hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
722e7225d9070e292c9bc8fbfd1dbbe4a26928c7.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <device_launch_parameters.h>
#include <arrayfire.h>
#include <af/cuda.h>
extern "C"
__declspec(dllexport)
void treeOffset(double *dtm, double *palshh, double *hans, double *nonf, double *trh, double *dem,
int *ncol, int *n);
//, double *phv_test, double *hans_test, double *blur_test, double *combdata_test);
using namespace af;
__global__ void cuLinearTransformation(const double *cupalshh, double *cuphh, int ncol) {
// printf("This works \n\n");
int col = blockIdx.x * blockDim.x + (threadIdx.x);
int row = blockIdx.y * blockDim.y + (threadIdx.y);
int id = col + row * ncol;
if (col < ncol-1 && row < ncol-1) {
if (cupalshh[id] < 6.9) {
cuphh[id] = 0;
}
else if (cupalshh[id] >= 6.9 && cupalshh[id] <= 7.6) {
cuphh[id] = (cupalshh[id] - 6.9) / 0.7;
}
else if (cupalshh[id] > 7.6) {
cuphh[id] = 1;
}
}
}
__global__ void cuHansTransformation(const double *cuhans, double *culinhans, int ncol) {
int col = blockIdx.x * blockDim.x + (threadIdx.x);
int row = blockIdx.y * blockDim.y + (threadIdx.y);
int id = col + row * ncol;
if (col < ncol-1 && row < ncol-1) {
if (cuhans[id] <= 50) {
culinhans[id] = 0;
}else if (cuhans[id] > 50 && cuhans[id] < 70) {
culinhans[id] = (cuhans[id] - 50) / 20;
}else if (cuhans[id] >= 70) {
culinhans[id] = 1;
}
}
}
__global__ void cuLinearFragmentation(const double *cupalshh, double *culinfragHV, int ncol) {
int col = blockIdx.x * blockDim.x + (threadIdx.x);
int row = blockIdx.y * blockDim.y + (threadIdx.y);
int id = col + row * ncol;
int colo = (blockIdx.x * blockDim.x + (threadIdx.x)) + 2;
int rowo = (blockIdx.y * blockDim.y + (threadIdx.y)) + 2;
int ido = colo + rowo * ncol;
if (col < ncol-1 && row < ncol-1) {
double focalpalshh[] =
{
cupalshh[id],
cupalshh[id + 1],
cupalshh[id + 2],
cupalshh[id + 3],
cupalshh[id + 4],
cupalshh[id + ncol],
cupalshh[id + 1 + ncol],
cupalshh[id + 2 + ncol],
cupalshh[id + 3 + ncol],
cupalshh[id + 4 + ncol],
cupalshh[id + (ncol * 2)],
cupalshh[id + 1 + (ncol * 2)],
cupalshh[id + 2 + (ncol * 2)],
cupalshh[id + 3 + (ncol * 2)],
cupalshh[id + 4 + (ncol * 2)],
cupalshh[id + (ncol * 3)],
cupalshh[id + 1 + (ncol * 3)],
cupalshh[id + 2 + (ncol * 3)],
cupalshh[id + 3 + (ncol * 3)],
cupalshh[id + 4 + (ncol * 3)],
cupalshh[id + (ncol * 4)],
cupalshh[id + 1 + (ncol * 4)],
cupalshh[id + 2 + (ncol * 4)],
cupalshh[id + 3 + (ncol * 4)],
cupalshh[id + 4 + (ncol * 4)]
};
double focalV[25];
double focalH[25];
double cuverFun[25] ={
-1.937, 0.801, 2.274, 0.801, -1.937,
-1.937, 0.801, 2.274, 0.801, -1.937,
-1.937, 0.801, 2.274, 0.801, -1.937,
-1.937, 0.801, 2.274, 0.801, -1.937,
-1.937, 0.801, 2.274, 0.801, -1.937 };
double cuhorFun[25] = {
-1.937, -1.937, -1.937, -1.937, -1.937,
0.801, 0.801, 0.801, 0.801, 0.801,
2.274, 2.274, 2.274, 2.274, 2.274,
0.801, 0.801, 0.801, 0.801, 0.801,
-1.937, -1.937, -1.937, -1.937, -1.937 };
for (int i = 0; i < 25; i++) {
focalV[i] = focalpalshh[i] * cuverFun[i];
focalH[i] = focalpalshh[i] * cuhorFun[i];
}
double sumV;
double sumH;
for (int i = 0; i < 25; i++) {
sumV = focalV[i] + sumV;
sumH = focalH[i] + sumH;
}
double linfragHV = sumH + sumV;
if ( (colo > 1 && colo < ncol - 1 ) && ( rowo > 1 && rowo < (ncol-1) ) ){
if (linfragHV < -1.5) {
culinfragHV[ido] = 0;
}
else if (linfragHV >= -1.5 && linfragHV <= 0) {
culinfragHV[ido] = linfragHV/-1.5;
}
else if (linfragHV > 0) {
culinfragHV[ido] =1;
}
}
}
}
__global__ void cuCombData(const double *cuphh, const double *culinfraghv, const double *culinhans, const double *cunonf, double *cucombdat, int ncol) {
int col = blockIdx.x * blockDim.x + (threadIdx.x);
int row = blockIdx.y * blockDim.y + (threadIdx.y);
int id = col + row * ncol;
if (col < ncol-1 && row < ncol-1) {
double sumCombDat = cuphh[id] + culinfraghv[id] + culinhans[id] + (cunonf[id]);
if (sumCombDat <=2.65) {
cucombdat[id] = 0;
}else if (sumCombDat > 2.5) {
cucombdat[id] = 1;
}
// printf("this is the cucombdat %g \n\n", cucombdat[id]);
}
}
af::array gaussianblur(const af::array &in, int window_width, int window_height, int sigma) {
af::array g = af::gaussiankernel(window_width, window_height, sigma, sigma);
return convolve(in, g);
}
__global__ void cuDEM(const double *cucombdata, const double *cutrh, const double *cudtm, double *dem, int ncol) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int id = col + row * ncol;
if ((col < (ncol - 1) && (col > 1)) && (row < (ncol - 1) && (row > 1)) ) {
double treeHeightModel = cucombdata[id] * cutrh[id];
dem[id] = cudtm[id] - (0.5* treeHeightModel);
}
}
void treeOffset(double *dtm, double *palshh, double *hans, double *nonf, double *trh, double *dem,
int *ncol, int *n){
//, double *phv_test, double *hans_test, double *blur_test, double *combdata_test ) {
// initialize device memory variables
double *d_dtm, *d_palshh, *d_hans, *d_nonf, *d_trh, *d_dem, *d_linfragHV, *d_phh, *d_linhans, *d_combdata; //inputs and outputs
//define grid and total window size
dim3 grid(*ncol/20, *ncol/20); // grid of 2D blocks
//these dimensions should be equal to the window dimensions of a raster
// the product of the 2d window dim should not exceed 1024 threads (depending on your GPU)
dim3 block(20, 20);// block of 2D threads
// allocate device memory for computations
// input allocation
hipMalloc((void**)&d_dtm, *n * sizeof(double));
hipMalloc((void**)&d_palshh, *n * sizeof(double));
hipMalloc((void**)&d_hans, *n * sizeof(double));
hipMalloc((void**)&d_nonf, *n * sizeof(double));
hipMalloc((void**)&d_trh, *n * sizeof(double));
// intermediary allocations
hipMalloc((void**)&d_linfragHV, *n * sizeof(double));
hipMalloc((void**)&d_linhans, *n * sizeof(double));
hipMalloc((void**)&d_combdata, *n * sizeof(double));
hipMalloc((void**)&d_phh, *n * sizeof(double));
// output allocation
hipMalloc((void**)&d_dem, *n * sizeof(double));
// copy host memory to allocated device memory
hipMemcpy(d_dtm, dtm, *n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_palshh, palshh, *n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_hans, hans, *n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_nonf, nonf, *n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_trh, trh, *n * sizeof(double), hipMemcpyHostToDevice);
// launch kernel with predefined block and thread numbers
cuLinearTransformation << <grid, block >> >(d_palshh, d_phh, *ncol);
// launch kernel with predefined block and thread numbers
cuHansTransformation << <grid, block >> >(d_hans, d_linhans, *ncol);
// launch kernel with predefined block and thread numbers
cuLinearFragmentation << <grid, block >> >(d_palshh, d_linfragHV, *ncol);
// launch kernel with predefined block and thread numbers
cuCombData << <grid, block >> >(d_phh, d_linfragHV, d_linhans, d_nonf, d_combdata, *ncol);
hipFree(d_nonf);
hipFree(d_linhans);
hipFree(d_linfragHV);
hipFree(d_palshh);
hipFree(d_phh);
hipFree(d_hans);
//conduct array fire operations
af::array d_A(*ncol, *ncol, d_combdata, afDevice);
af::eval(d_A);
af::sync();
af::array d_B = gaussianblur(d_A, 5, 5, 1.5);
//return array fire arrays to device memory
double *d_blurfunction = d_B.device<double>();
// launch kernel with predefined block and thread numbers
cuDEM << <grid, block >> >(d_blurfunction, d_trh, d_dtm, d_dem, *ncol);
hipMemcpy(dem, d_dem, *n * sizeof(double), hipMemcpyDeviceToHost);
/* hipMemcpy(phv_test, d_linfragHV, *n * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(hans_test, d_linhans, *n * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(combdata_test, d_combdata, *n * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(blur_test, d_blurdata, *n * sizeof(double), hipMemcpyDeviceToHost);
*/
// free memory
hipFree(d_dem);
hipFree(d_dtm);
hipFree(d_trh);
hipFree(d_combdata);
hipFree(d_blurfunction);
} | 722e7225d9070e292c9bc8fbfd1dbbe4a26928c7.cu | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <device_launch_parameters.h>
#include <arrayfire.h>
#include <af/cuda.h>
extern "C"
__declspec(dllexport)
void treeOffset(double *dtm, double *palshh, double *hans, double *nonf, double *trh, double *dem,
int *ncol, int *n);
//, double *phv_test, double *hans_test, double *blur_test, double *combdata_test);
using namespace af;
__global__ void cuLinearTransformation(const double *cupalshh, double *cuphh, int ncol) {
// printf("This works \n\n");
int col = blockIdx.x * blockDim.x + (threadIdx.x);
int row = blockIdx.y * blockDim.y + (threadIdx.y);
int id = col + row * ncol;
if (col < ncol-1 && row < ncol-1) {
if (cupalshh[id] < 6.9) {
cuphh[id] = 0;
}
else if (cupalshh[id] >= 6.9 && cupalshh[id] <= 7.6) {
cuphh[id] = (cupalshh[id] - 6.9) / 0.7;
}
else if (cupalshh[id] > 7.6) {
cuphh[id] = 1;
}
}
}
__global__ void cuHansTransformation(const double *cuhans, double *culinhans, int ncol) {
int col = blockIdx.x * blockDim.x + (threadIdx.x);
int row = blockIdx.y * blockDim.y + (threadIdx.y);
int id = col + row * ncol;
if (col < ncol-1 && row < ncol-1) {
if (cuhans[id] <= 50) {
culinhans[id] = 0;
}else if (cuhans[id] > 50 && cuhans[id] < 70) {
culinhans[id] = (cuhans[id] - 50) / 20;
}else if (cuhans[id] >= 70) {
culinhans[id] = 1;
}
}
}
__global__ void cuLinearFragmentation(const double *cupalshh, double *culinfragHV, int ncol) {
int col = blockIdx.x * blockDim.x + (threadIdx.x);
int row = blockIdx.y * blockDim.y + (threadIdx.y);
int id = col + row * ncol;
int colo = (blockIdx.x * blockDim.x + (threadIdx.x)) + 2;
int rowo = (blockIdx.y * blockDim.y + (threadIdx.y)) + 2;
int ido = colo + rowo * ncol;
if (col < ncol-1 && row < ncol-1) {
double focalpalshh[] =
{
cupalshh[id],
cupalshh[id + 1],
cupalshh[id + 2],
cupalshh[id + 3],
cupalshh[id + 4],
cupalshh[id + ncol],
cupalshh[id + 1 + ncol],
cupalshh[id + 2 + ncol],
cupalshh[id + 3 + ncol],
cupalshh[id + 4 + ncol],
cupalshh[id + (ncol * 2)],
cupalshh[id + 1 + (ncol * 2)],
cupalshh[id + 2 + (ncol * 2)],
cupalshh[id + 3 + (ncol * 2)],
cupalshh[id + 4 + (ncol * 2)],
cupalshh[id + (ncol * 3)],
cupalshh[id + 1 + (ncol * 3)],
cupalshh[id + 2 + (ncol * 3)],
cupalshh[id + 3 + (ncol * 3)],
cupalshh[id + 4 + (ncol * 3)],
cupalshh[id + (ncol * 4)],
cupalshh[id + 1 + (ncol * 4)],
cupalshh[id + 2 + (ncol * 4)],
cupalshh[id + 3 + (ncol * 4)],
cupalshh[id + 4 + (ncol * 4)]
};
double focalV[25];
double focalH[25];
double cuverFun[25] ={
-1.937, 0.801, 2.274, 0.801, -1.937,
-1.937, 0.801, 2.274, 0.801, -1.937,
-1.937, 0.801, 2.274, 0.801, -1.937,
-1.937, 0.801, 2.274, 0.801, -1.937,
-1.937, 0.801, 2.274, 0.801, -1.937 };
double cuhorFun[25] = {
-1.937, -1.937, -1.937, -1.937, -1.937,
0.801, 0.801, 0.801, 0.801, 0.801,
2.274, 2.274, 2.274, 2.274, 2.274,
0.801, 0.801, 0.801, 0.801, 0.801,
-1.937, -1.937, -1.937, -1.937, -1.937 };
for (int i = 0; i < 25; i++) {
focalV[i] = focalpalshh[i] * cuverFun[i];
focalH[i] = focalpalshh[i] * cuhorFun[i];
}
double sumV;
double sumH;
for (int i = 0; i < 25; i++) {
sumV = focalV[i] + sumV;
sumH = focalH[i] + sumH;
}
double linfragHV = sumH + sumV;
if ( (colo > 1 && colo < ncol - 1 ) && ( rowo > 1 && rowo < (ncol-1) ) ){
if (linfragHV < -1.5) {
culinfragHV[ido] = 0;
}
else if (linfragHV >= -1.5 && linfragHV <= 0) {
culinfragHV[ido] = linfragHV/-1.5;
}
else if (linfragHV > 0) {
culinfragHV[ido] =1;
}
}
}
}
__global__ void cuCombData(const double *cuphh, const double *culinfraghv, const double *culinhans, const double *cunonf, double *cucombdat, int ncol) {
int col = blockIdx.x * blockDim.x + (threadIdx.x);
int row = blockIdx.y * blockDim.y + (threadIdx.y);
int id = col + row * ncol;
if (col < ncol-1 && row < ncol-1) {
double sumCombDat = cuphh[id] + culinfraghv[id] + culinhans[id] + (cunonf[id]);
if (sumCombDat <=2.65) {
cucombdat[id] = 0;
}else if (sumCombDat > 2.5) {
cucombdat[id] = 1;
}
// printf("this is the cucombdat %g \n\n", cucombdat[id]);
}
}
af::array gaussianblur(const af::array &in, int window_width, int window_height, int sigma) {
af::array g = af::gaussiankernel(window_width, window_height, sigma, sigma);
return convolve(in, g);
}
__global__ void cuDEM(const double *cucombdata, const double *cutrh, const double *cudtm, double *dem, int ncol) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int id = col + row * ncol;
if ((col < (ncol - 1) && (col > 1)) && (row < (ncol - 1) && (row > 1)) ) {
double treeHeightModel = cucombdata[id] * cutrh[id];
dem[id] = cudtm[id] - (0.5* treeHeightModel);
}
}
void treeOffset(double *dtm, double *palshh, double *hans, double *nonf, double *trh, double *dem,
int *ncol, int *n){
//, double *phv_test, double *hans_test, double *blur_test, double *combdata_test ) {
// initialize device memory variables
double *d_dtm, *d_palshh, *d_hans, *d_nonf, *d_trh, *d_dem, *d_linfragHV, *d_phh, *d_linhans, *d_combdata; //inputs and outputs
//define grid and total window size
dim3 grid(*ncol/20, *ncol/20); // grid of 2D blocks
//these dimensions should be equal to the window dimensions of a raster
// the product of the 2d window dim should not exceed 1024 threads (depending on your GPU)
dim3 block(20, 20);// block of 2D threads
// allocate device memory for computations
// input allocation
cudaMalloc((void**)&d_dtm, *n * sizeof(double));
cudaMalloc((void**)&d_palshh, *n * sizeof(double));
cudaMalloc((void**)&d_hans, *n * sizeof(double));
cudaMalloc((void**)&d_nonf, *n * sizeof(double));
cudaMalloc((void**)&d_trh, *n * sizeof(double));
// intermediary allocations
cudaMalloc((void**)&d_linfragHV, *n * sizeof(double));
cudaMalloc((void**)&d_linhans, *n * sizeof(double));
cudaMalloc((void**)&d_combdata, *n * sizeof(double));
cudaMalloc((void**)&d_phh, *n * sizeof(double));
// output allocation
cudaMalloc((void**)&d_dem, *n * sizeof(double));
// copy host memory to allocated device memory
cudaMemcpy(d_dtm, dtm, *n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_palshh, palshh, *n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_hans, hans, *n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_nonf, nonf, *n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_trh, trh, *n * sizeof(double), cudaMemcpyHostToDevice);
// launch kernel with predefined block and thread numbers
cuLinearTransformation << <grid, block >> >(d_palshh, d_phh, *ncol);
// launch kernel with predefined block and thread numbers
cuHansTransformation << <grid, block >> >(d_hans, d_linhans, *ncol);
// launch kernel with predefined block and thread numbers
cuLinearFragmentation << <grid, block >> >(d_palshh, d_linfragHV, *ncol);
// launch kernel with predefined block and thread numbers
cuCombData << <grid, block >> >(d_phh, d_linfragHV, d_linhans, d_nonf, d_combdata, *ncol);
cudaFree(d_nonf);
cudaFree(d_linhans);
cudaFree(d_linfragHV);
cudaFree(d_palshh);
cudaFree(d_phh);
cudaFree(d_hans);
//conduct array fire operations
af::array d_A(*ncol, *ncol, d_combdata, afDevice);
af::eval(d_A);
af::sync();
af::array d_B = gaussianblur(d_A, 5, 5, 1.5);
//return array fire arrays to device memory
double *d_blurfunction = d_B.device<double>();
// launch kernel with predefined block and thread numbers
cuDEM << <grid, block >> >(d_blurfunction, d_trh, d_dtm, d_dem, *ncol);
cudaMemcpy(dem, d_dem, *n * sizeof(double), cudaMemcpyDeviceToHost);
/* cudaMemcpy(phv_test, d_linfragHV, *n * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(hans_test, d_linhans, *n * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(combdata_test, d_combdata, *n * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(blur_test, d_blurdata, *n * sizeof(double), cudaMemcpyDeviceToHost);
*/
// free memory
cudaFree(d_dem);
cudaFree(d_dtm);
cudaFree(d_trh);
cudaFree(d_combdata);
cudaFree(d_blurfunction);
} |
a908aeaf4a2ae397253eba7d728e8fed8cdcc38f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* c-ray_main.cu
*
* The simplified GPU ray-tracing program.
* For this version, we use some __device__ or __const__ global
* arrays and variables instead of always pass them as arguments for device functions.
*
* usage:
* compile with the Makefile
* run as: ./a.out -i scene_file -o img_file -v -w 800 -h 600 -r 1
* -v: print time info
* -i: input scene file path
* -o: output image file path, with .ppm suffix
* -w: output image x resolution
* -h: output image y resolution
* -r: the number of rays per pixel
*
*/
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <cerrno>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include "../../common/helper_getopt.h"
#include "../../common/helper_timer.h"
#include "../../common/helper_err.h"
#include "c-ray_kernel_v2.h"
/*
* global variables
*/
vec2_t urand[NRAN];
int irand[NRAN];
int xres = 800;
int yres = 600;
int rays_per_pixel = 1;
FTYPE aspect = 1.333333;
sphere_t *obj_list;
vec3_t lights[MAX_LIGHTS];
int lnum = 0;
camera_t cam;
sphere_array_t obj_array; // used to replace the obj_list
int obj_count;
__device__ global_vars g_vars_d;
__device__ vec3_t lights_d[MAX_LIGHTS];
__device__ vec2_t urand_d[NRAN];
__device__ int irand_d[NRAN];
void load_scene(FILE *fp);
int main(int argc, char**argv){
FILE *infile = NULL;
FILE *outfile = NULL;
uint32_t *pixels;
bool printTime = false;
char* infile_path;
char* outfile_path;
/* Parse command line options */
int opt;
extern char *optarg;
extern int optind;
while ( (opt=getopt(argc,argv,"w:h:r:i:o:v"))!= EOF) {
switch (opt) {
case 'v': printTime = true;
break;
case 'i': infile_path=optarg;
break;
case 'o': outfile_path = optarg;
break;
case 'w': xres = atoi(optarg);
break;
case 'h': yres = atoi(optarg);
break;
case 'r': rays_per_pixel = atoi(optarg);
break;
case ':':
std::cerr<<"Option -"<<(char)optopt<<" requires an operand\n"<<std::endl;
break;
case '?':
std::cerr<<"Unrecognized option: -"<<(char)optopt<<std::endl;
break;
default:
break;
}
}
if((infile = fopen((const char*)infile_path, "rb"))==NULL){
std::cerr<<"Error, cannot open scene file."<<std::endl;
}
if((outfile = fopen((const char*)outfile_path, "wb"))==NULL){
std::cerr<<"Error, cannot open output file."<<std::endl;
}
if(!(pixels = (uint32_t*)malloc(xres * yres * sizeof *pixels))) {
perror("pixel buffer allocation failed");
return EXIT_FAILURE;
}
/*
* read the input scene file
*/
if(infile ==NULL){
std::cerr<<"Need a input scene file."<<std::endl;
exit(1);
}
load_scene(infile);
/* initialize the random number tables for the jitter */
for(int i=0; i<NRAN; i++) urand[i].x = (double)rand() / RAND_MAX - 0.5;
for(int i=0; i<NRAN; i++) urand[i].y = (double)rand() / RAND_MAX - 0.5;
for(int i=0; i<NRAN; i++) irand[i] = (int)(NRAN * ((double)rand() / RAND_MAX));
struct global_vars g_vars;
g_vars.aspect = aspect;
g_vars.cam = cam;
g_vars.lnum = lnum;
g_vars.obj_count = obj_count;
g_vars.xres = xres;
g_vars.yres = yres;
g_vars.rays_per_pixel = rays_per_pixel;
hipSetDevice(0);
/*
* define and create necessary data structures on GPU
*/
unsigned int *pixels_d;
sphere_array_t obj_array_d;
checkCudaErr(hipMalloc(&pixels_d, sizeof(unsigned int)*xres*yres));
checkCudaErr(hipMalloc(&(obj_array_d.pos), sizeof(vec3_t)*obj_count));
checkCudaErr(hipMalloc(&(obj_array_d.mat), sizeof(material_t)*obj_count));
checkCudaErr(hipMalloc(&(obj_array_d.rad), sizeof(FTYPE)*obj_count));
/*
* copy data in
*/
double copyinTime = 0;
double t1, t2;
t1 = getTime();
checkCudaErr(
hipMemcpy(obj_array_d.pos, obj_array.pos, sizeof(vec3_t)*obj_count,
hipMemcpyHostToDevice));
checkCudaErr(
hipMemcpy(obj_array_d.mat, obj_array.mat, sizeof(material_t)*obj_count,
hipMemcpyHostToDevice));
checkCudaErr(
hipMemcpy(obj_array_d.rad, obj_array.rad, sizeof(FTYPE)*obj_count,
hipMemcpyHostToDevice));
checkCudaErr(
hipMemcpyToSymbol(lights_d, lights, sizeof(vec3_t)*MAX_LIGHTS, 0,
hipMemcpyHostToDevice));
checkCudaErr(
hipMemcpyToSymbol(urand_d, urand, sizeof(vec2_t)*NRAN, 0,
hipMemcpyHostToDevice));
checkCudaErr(
hipMemcpyToSymbol(irand_d, irand, sizeof(int)*NRAN, 0,
hipMemcpyHostToDevice));
checkCudaErr(
hipMemcpyToSymbol(g_vars_d, (void*)&g_vars, sizeof(g_vars), 0,
hipMemcpyHostToDevice));
t2 = getTime();
copyinTime = t2-t1;
/*
* call kernel
*
* kernel has a recursive function call and may call stack overflow,
* so need to set the cuda user stack frame to a larger size
*
* perthread stack size should < local memory size(64KB) and
* < (gpu_mem_size)/(#sm_of_gpu)/(#threads_per_sm)
*
*/
size_t stacksize;
hipThreadGetLimit(&stacksize, hipLimitStackSize);
//std::cout<<stacksize<<std::endl; //defaut is 1024
stacksize = 1024*4;
hipThreadSetLimit(hipLimitStackSize, stacksize);
dim3 block(16, 8, 1);
dim3 grid((xres+block.x-1)/block.x, (yres+block.y-1)/block.y,1);
double kernelrunTime = 0;
t1= getTime();
hipLaunchKernelGGL(( render_kernel), dim3(grid), dim3(block), 0, 0,
pixels_d,
obj_array_d);
checkCudaErr(hipDeviceSynchronize());
t2 = getTime();
kernelrunTime =t2-t1;
/*
* copy data out
*/
double copyoutTime = 0;
t1 = getTime();
checkCudaErr(
hipMemcpy(pixels, pixels_d, sizeof(unsigned int)*xres*yres,
hipMemcpyDeviceToHost));
t2 = getTime();
copyoutTime = t2 - t1;
hipFree(pixels_d);
hipFree(obj_array_d.pos);
hipFree(obj_array_d.mat);
hipFree(obj_array_d.rad);
hipDeviceReset();
/*
* output the image
*/
if(outfile != NULL){
fprintf(outfile, "P6\n%d %d\n255\n", xres, yres);
for(int i=0; i<xres * yres; i++) {
fputc((pixels[i] >> RSHIFT) & 0xff, outfile);
fputc((pixels[i] >> GSHIFT) & 0xff, outfile);
fputc((pixels[i] >> BSHIFT) & 0xff, outfile);
}
fflush(outfile);
}
if(infile) fclose(infile);
if(outfile) fclose(outfile);
if(obj_array.pos){
free(obj_array.pos);
free(obj_array.mat);
free(obj_array.rad);
}
free(pixels);
double totaltime = kernelrunTime + copyinTime + copyoutTime;
if(printTime){
std::cout<<"Scene info:"<<std::endl;
std::cout<<"\tNumber of objects: "<<obj_count<<std::endl;
std::cout<<"\tNumber of lights: "<<lnum<<std::endl;
std::cout<<"\tTracing depth: "<<MAX_RAY_DEPTH<<std::endl;
std::cout<<"\tRays per pixel: "<<rays_per_pixel<<std::endl;
std::cout<<"Output image: "<<xres<<" X "<<yres<<std::endl;
std::cout<<"Total time: "<<std::fixed<<std::setprecision(4)<<totaltime<<std::endl;
std::cout<<"Kernel Runtime: "<<std::fixed<<std::setprecision(4)<<kernelrunTime<<"(s)"<<std::endl;
std::cout<<"copy in time: "<<std::fixed<<std::setprecision(4)<<copyinTime<<"(s)"<<std::endl;
std::cout<<"copy out time: "<<std::fixed<<std::setprecision(4)<<copyoutTime<<"(s)"<<std::endl;
}
return 0;
}
/* Load the scene from an extremely simple scene description file */
#define DELIM " \t\n"
void load_scene(FILE *fp) {
char line[256], *ptr, type;
obj_list = (sphere_t*)malloc(sizeof(struct sphere));
obj_list->next = NULL;
obj_count=0;
while((ptr = fgets(line, 256, fp))) {
int i;
vec3_t pos, col;
FTYPE rad, spow, refl;
while(*ptr == ' ' || *ptr == '\t') ptr++;
if(*ptr == '#' || *ptr == '\n') continue;
if(!(ptr = strtok(line, DELIM))) continue;
type = *ptr;
for(i=0; i<3; i++) {
if(!(ptr = strtok(0, DELIM))) break;
*((FTYPE*)&pos.x + i) = (FTYPE)atof(ptr);
}
if(type == 'l') {
lights[lnum++] = pos;
continue;
}
if(!(ptr = strtok(0, DELIM))) continue;
rad = atof(ptr);
for(i=0; i<3; i++) {
if(!(ptr = strtok(0, DELIM))) break;
*((FTYPE*)&col.x + i) = (FTYPE)atof(ptr);
}
if(type == 'c') {
cam.pos = pos;
cam.targ = col;
cam.fov = rad;
continue;
}
if(!(ptr = strtok(0, DELIM))) continue;
spow = (FTYPE)atof(ptr);
if(!(ptr = strtok(0, DELIM))) continue;
refl = (FTYPE)atof(ptr);
if(type == 's') {
obj_count++;
struct sphere *sph = (sphere_t*)malloc(sizeof *sph);
sph->next = obj_list->next;
obj_list->next = sph;
sph->pos = pos;
sph->rad = rad;
sph->mat.col = col;
sph->mat.spow = spow;
sph->mat.refl = refl;
} else {
fprintf(stderr, "unknown type: %c\n", type);
}
}
/*
* change the sphere linked list to an array
*/
obj_array.pos = (vec3_t*)malloc(sizeof(vec3_t)*obj_count);
obj_array.mat = (material_t*)malloc(sizeof(material_t)*obj_count);
obj_array.rad = (FTYPE*)malloc(sizeof(FTYPE)*obj_count);
sphere_t *p1 = obj_list->next;
sphere_t *p2 = p1;
int i=0;
while(p1!=NULL){
obj_array.pos[i] = p1->pos;
obj_array.rad[i] = p1->rad;
obj_array.mat[i].col = p1->mat.col;
obj_array.mat[i].spow = p1->mat.spow;
obj_array.mat[i].refl = p1->mat.refl;
p2 = p1;
p1 = p1->next;
free(p2);
i++;
}
obj_list->next = NULL;
free(obj_list);
}
| a908aeaf4a2ae397253eba7d728e8fed8cdcc38f.cu | /*
* c-ray_main.cu
*
* The simplified GPU ray-tracing program.
* For this version, we use some __device__ or __const__ global
* arrays and variables instead of always pass them as arguments for device functions.
*
* usage:
* compile with the Makefile
* run as: ./a.out -i scene_file -o img_file -v -w 800 -h 600 -r 1
* -v: print time info
* -i: input scene file path
* -o: output image file path, with .ppm suffix
* -w: output image x resolution
* -h: output image y resolution
* -r: the number of rays per pixel
*
*/
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <cerrno>
#include <stdint.h>
#include <cuda_runtime.h>
#include "../../common/helper_getopt.h"
#include "../../common/helper_timer.h"
#include "../../common/helper_err.h"
#include "c-ray_kernel_v2.h"
/*
* global variables
*/
vec2_t urand[NRAN];
int irand[NRAN];
int xres = 800;
int yres = 600;
int rays_per_pixel = 1;
FTYPE aspect = 1.333333;
sphere_t *obj_list;
vec3_t lights[MAX_LIGHTS];
int lnum = 0;
camera_t cam;
sphere_array_t obj_array; // used to replace the obj_list
int obj_count;
__device__ global_vars g_vars_d;
__device__ vec3_t lights_d[MAX_LIGHTS];
__device__ vec2_t urand_d[NRAN];
__device__ int irand_d[NRAN];
void load_scene(FILE *fp);
int main(int argc, char**argv){
FILE *infile = NULL;
FILE *outfile = NULL;
uint32_t *pixels;
bool printTime = false;
char* infile_path;
char* outfile_path;
/* Parse command line options */
int opt;
extern char *optarg;
extern int optind;
while ( (opt=getopt(argc,argv,"w:h:r:i:o:v"))!= EOF) {
switch (opt) {
case 'v': printTime = true;
break;
case 'i': infile_path=optarg;
break;
case 'o': outfile_path = optarg;
break;
case 'w': xres = atoi(optarg);
break;
case 'h': yres = atoi(optarg);
break;
case 'r': rays_per_pixel = atoi(optarg);
break;
case ':':
std::cerr<<"Option -"<<(char)optopt<<" requires an operand\n"<<std::endl;
break;
case '?':
std::cerr<<"Unrecognized option: -"<<(char)optopt<<std::endl;
break;
default:
break;
}
}
if((infile = fopen((const char*)infile_path, "rb"))==NULL){
std::cerr<<"Error, cannot open scene file."<<std::endl;
}
if((outfile = fopen((const char*)outfile_path, "wb"))==NULL){
std::cerr<<"Error, cannot open output file."<<std::endl;
}
if(!(pixels = (uint32_t*)malloc(xres * yres * sizeof *pixels))) {
perror("pixel buffer allocation failed");
return EXIT_FAILURE;
}
/*
* read the input scene file
*/
if(infile ==NULL){
std::cerr<<"Need a input scene file."<<std::endl;
exit(1);
}
load_scene(infile);
/* initialize the random number tables for the jitter */
for(int i=0; i<NRAN; i++) urand[i].x = (double)rand() / RAND_MAX - 0.5;
for(int i=0; i<NRAN; i++) urand[i].y = (double)rand() / RAND_MAX - 0.5;
for(int i=0; i<NRAN; i++) irand[i] = (int)(NRAN * ((double)rand() / RAND_MAX));
struct global_vars g_vars;
g_vars.aspect = aspect;
g_vars.cam = cam;
g_vars.lnum = lnum;
g_vars.obj_count = obj_count;
g_vars.xres = xres;
g_vars.yres = yres;
g_vars.rays_per_pixel = rays_per_pixel;
cudaSetDevice(0);
/*
* define and create necessary data structures on GPU
*/
unsigned int *pixels_d;
sphere_array_t obj_array_d;
checkCudaErr(cudaMalloc(&pixels_d, sizeof(unsigned int)*xres*yres));
checkCudaErr(cudaMalloc(&(obj_array_d.pos), sizeof(vec3_t)*obj_count));
checkCudaErr(cudaMalloc(&(obj_array_d.mat), sizeof(material_t)*obj_count));
checkCudaErr(cudaMalloc(&(obj_array_d.rad), sizeof(FTYPE)*obj_count));
/*
* copy data in
*/
double copyinTime = 0;
double t1, t2;
t1 = getTime();
checkCudaErr(
cudaMemcpy(obj_array_d.pos, obj_array.pos, sizeof(vec3_t)*obj_count,
cudaMemcpyHostToDevice));
checkCudaErr(
cudaMemcpy(obj_array_d.mat, obj_array.mat, sizeof(material_t)*obj_count,
cudaMemcpyHostToDevice));
checkCudaErr(
cudaMemcpy(obj_array_d.rad, obj_array.rad, sizeof(FTYPE)*obj_count,
cudaMemcpyHostToDevice));
checkCudaErr(
cudaMemcpyToSymbol(lights_d, lights, sizeof(vec3_t)*MAX_LIGHTS, 0,
cudaMemcpyHostToDevice));
checkCudaErr(
cudaMemcpyToSymbol(urand_d, urand, sizeof(vec2_t)*NRAN, 0,
cudaMemcpyHostToDevice));
checkCudaErr(
cudaMemcpyToSymbol(irand_d, irand, sizeof(int)*NRAN, 0,
cudaMemcpyHostToDevice));
checkCudaErr(
cudaMemcpyToSymbol(g_vars_d, (void*)&g_vars, sizeof(g_vars), 0,
cudaMemcpyHostToDevice));
t2 = getTime();
copyinTime = t2-t1;
/*
* call kernel
*
* kernel has a recursive function call and may call stack overflow,
* so need to set the cuda user stack frame to a larger size
*
* perthread stack size should < local memory size(64KB) and
* < (gpu_mem_size)/(#sm_of_gpu)/(#threads_per_sm)
*
*/
size_t stacksize;
cudaThreadGetLimit(&stacksize, cudaLimitStackSize);
//std::cout<<stacksize<<std::endl; //defaut is 1024
stacksize = 1024*4;
cudaThreadSetLimit(cudaLimitStackSize, stacksize);
dim3 block(16, 8, 1);
dim3 grid((xres+block.x-1)/block.x, (yres+block.y-1)/block.y,1);
double kernelrunTime = 0;
t1= getTime();
render_kernel<<<grid, block>>>(
pixels_d,
obj_array_d);
checkCudaErr(cudaDeviceSynchronize());
t2 = getTime();
kernelrunTime =t2-t1;
/*
* copy data out
*/
double copyoutTime = 0;
t1 = getTime();
checkCudaErr(
cudaMemcpy(pixels, pixels_d, sizeof(unsigned int)*xres*yres,
cudaMemcpyDeviceToHost));
t2 = getTime();
copyoutTime = t2 - t1;
cudaFree(pixels_d);
cudaFree(obj_array_d.pos);
cudaFree(obj_array_d.mat);
cudaFree(obj_array_d.rad);
cudaDeviceReset();
/*
* output the image
*/
if(outfile != NULL){
fprintf(outfile, "P6\n%d %d\n255\n", xres, yres);
for(int i=0; i<xres * yres; i++) {
fputc((pixels[i] >> RSHIFT) & 0xff, outfile);
fputc((pixels[i] >> GSHIFT) & 0xff, outfile);
fputc((pixels[i] >> BSHIFT) & 0xff, outfile);
}
fflush(outfile);
}
if(infile) fclose(infile);
if(outfile) fclose(outfile);
if(obj_array.pos){
free(obj_array.pos);
free(obj_array.mat);
free(obj_array.rad);
}
free(pixels);
double totaltime = kernelrunTime + copyinTime + copyoutTime;
if(printTime){
std::cout<<"Scene info:"<<std::endl;
std::cout<<"\tNumber of objects: "<<obj_count<<std::endl;
std::cout<<"\tNumber of lights: "<<lnum<<std::endl;
std::cout<<"\tTracing depth: "<<MAX_RAY_DEPTH<<std::endl;
std::cout<<"\tRays per pixel: "<<rays_per_pixel<<std::endl;
std::cout<<"Output image: "<<xres<<" X "<<yres<<std::endl;
std::cout<<"Total time: "<<std::fixed<<std::setprecision(4)<<totaltime<<std::endl;
std::cout<<"Kernel Runtime: "<<std::fixed<<std::setprecision(4)<<kernelrunTime<<"(s)"<<std::endl;
std::cout<<"copy in time: "<<std::fixed<<std::setprecision(4)<<copyinTime<<"(s)"<<std::endl;
std::cout<<"copy out time: "<<std::fixed<<std::setprecision(4)<<copyoutTime<<"(s)"<<std::endl;
}
return 0;
}
/* Load the scene from an extremely simple scene description file */
#define DELIM " \t\n"
void load_scene(FILE *fp) {
char line[256], *ptr, type;
obj_list = (sphere_t*)malloc(sizeof(struct sphere));
obj_list->next = NULL;
obj_count=0;
while((ptr = fgets(line, 256, fp))) {
int i;
vec3_t pos, col;
FTYPE rad, spow, refl;
while(*ptr == ' ' || *ptr == '\t') ptr++;
if(*ptr == '#' || *ptr == '\n') continue;
if(!(ptr = strtok(line, DELIM))) continue;
type = *ptr;
for(i=0; i<3; i++) {
if(!(ptr = strtok(0, DELIM))) break;
*((FTYPE*)&pos.x + i) = (FTYPE)atof(ptr);
}
if(type == 'l') {
lights[lnum++] = pos;
continue;
}
if(!(ptr = strtok(0, DELIM))) continue;
rad = atof(ptr);
for(i=0; i<3; i++) {
if(!(ptr = strtok(0, DELIM))) break;
*((FTYPE*)&col.x + i) = (FTYPE)atof(ptr);
}
if(type == 'c') {
cam.pos = pos;
cam.targ = col;
cam.fov = rad;
continue;
}
if(!(ptr = strtok(0, DELIM))) continue;
spow = (FTYPE)atof(ptr);
if(!(ptr = strtok(0, DELIM))) continue;
refl = (FTYPE)atof(ptr);
if(type == 's') {
obj_count++;
struct sphere *sph = (sphere_t*)malloc(sizeof *sph);
sph->next = obj_list->next;
obj_list->next = sph;
sph->pos = pos;
sph->rad = rad;
sph->mat.col = col;
sph->mat.spow = spow;
sph->mat.refl = refl;
} else {
fprintf(stderr, "unknown type: %c\n", type);
}
}
/*
* change the sphere linked list to an array
*/
obj_array.pos = (vec3_t*)malloc(sizeof(vec3_t)*obj_count);
obj_array.mat = (material_t*)malloc(sizeof(material_t)*obj_count);
obj_array.rad = (FTYPE*)malloc(sizeof(FTYPE)*obj_count);
sphere_t *p1 = obj_list->next;
sphere_t *p2 = p1;
int i=0;
while(p1!=NULL){
obj_array.pos[i] = p1->pos;
obj_array.rad[i] = p1->rad;
obj_array.mat[i].col = p1->mat.col;
obj_array.mat[i].spow = p1->mat.spow;
obj_array.mat[i].refl = p1->mat.refl;
p2 = p1;
p1 = p1->next;
free(p2);
i++;
}
obj_list->next = NULL;
free(obj_list);
}
|
4ebef2a4b6143d5a76e92f67f0c20181cfbc4263.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "mytime.h"
#define N 4096
// This program works for ODD value of N as well
void fill_values(int *array, int n) {
time_t t;
srand((unsigned) time(&t));
for(int j=0; j<n; j++) {
//array[j]=j;
if(j % 2 == 0)
array[j] = rand() % 200;
else{
//array[j] = j - j * rand() % (rand() * 200);
array[j] = rand() % 100 + rand() % 100;
}
}
}
void printValues(int *a , int n){
int sum=0;
for(int i=0;i <n; i++){
//printf("%4d", a[i]);
sum += a[i];
}
//printf("\n");
//printf("SUM: %d\n", sum);
}
__global__ void dk(int *a, int n, int iteration){
unsigned id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < n){
unsigned index = id * (1 << (iteration+1));
//unsigned index = id * (int)pow(2.0, iteration+1);
unsigned shift_index = (1 << iteration);
//unsigned shift_index = (int)pow(2.0, iteration);
//if( n % 2 == 1 && id == n-1 ){
// a[index] = a[index] + a[ index + shift_index ] +a[index + 2 * shift_index ];
//printf("a[%d] = a[%d] + a[%d] + a[%d] = %d \n",index,index, index + shift_index ,index + 2 * shift_index , a[index]);
//}
//else{
a[index] = a[index] + a[ index + shift_index];
//printf("a[%d] = a[%d] + a[%d] = %d\n",index, index, index + shift_index , a[index]);
//}
}
//__syncthreads();
}
int main(int argc, char** argv){
double start, end;
unsigned bytes = sizeof(int) * N;
//~ unsigned sumbytes = sizeof(int) ;
int *a = (int *) malloc (bytes);
//~ int *sum= (int *) malloc (sumbytes);
fill_values(a,N); // fills random values
//CPUTimer cputimer;
//cputimer.Start();
start = rtclock();
printValues(a,N); // prints and finds cpu sum as well.
//cputimer.Stop();
end = rtclock();
printtime("Sequential time: ", start, end);
//printf("The sequential code ran in %f ms\n", cputimer.Elapsed()*1000);
int *da;
//~ int *dsum; // removing it as a[0] stores the final result
hipMalloc(&da, bytes);
//~ hipMalloc(&dsum, sumbytes);
hipMemset(da, 0,bytes);
//~ hipMemset(dsum, 0,sumbytes);
hipMemcpy(da,a,bytes, hipMemcpyHostToDevice);
unsigned numThreads = 1024;
//GPUTimer gputimer;
//gputimer.Start();
start = rtclock();
for(int i = N/2, j=0; i > 0; j++,i=i/2) {
hipLaunchKernelGGL(( dk), dim3((ceil((float)i/numThreads))) , dim3(numThreads), 0, 0, da, i, j);
//hipDeviceSynchronize();
}
//dk<<< 1, i>>>(da, i, j);
//~ dk<<< 1, N/2>>>(da, N, dsum, 0);
//~ dk<<< 1, N/4>>>(da, N, dsum, 1);
//~ dk<<< 1, N/8>>>(da, N, dsum, 2);
//~ printValues(a,N);
//gputimer.Stop();
end = rtclock();
//printf("The Parallel code ran in %f ms\n", gputimer.Elapsed()*1000);
printtime("Parallel time: ", start, end);
//~ hipMemcpy(sum,dsum,sumbytes, hipMemcpyDeviceToHost);
hipMemcpy(a,da, bytes, hipMemcpyDeviceToHost);
printf("Gpu sum %d\n", a[0]);
hipFree(da);
//~ hipFree(dsum);
return 0;
}
| 4ebef2a4b6143d5a76e92f67f0c20181cfbc4263.cu | #include <cuda.h>
#include <stdio.h>
#include "mytime.h"
#define N 4096
// This program works for ODD value of N as well
void fill_values(int *array, int n) {
time_t t;
srand((unsigned) time(&t));
for(int j=0; j<n; j++) {
//array[j]=j;
if(j % 2 == 0)
array[j] = rand() % 200;
else{
//array[j] = j - j * rand() % (rand() * 200);
array[j] = rand() % 100 + rand() % 100;
}
}
}
void printValues(int *a , int n){
int sum=0;
for(int i=0;i <n; i++){
//printf("%4d", a[i]);
sum += a[i];
}
//printf("\n");
//printf("SUM: %d\n", sum);
}
__global__ void dk(int *a, int n, int iteration){
unsigned id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < n){
unsigned index = id * (1 << (iteration+1));
//unsigned index = id * (int)pow(2.0, iteration+1);
unsigned shift_index = (1 << iteration);
//unsigned shift_index = (int)pow(2.0, iteration);
//if( n % 2 == 1 && id == n-1 ){
// a[index] = a[index] + a[ index + shift_index ] +a[index + 2 * shift_index ];
//printf("a[%d] = a[%d] + a[%d] + a[%d] = %d \n",index,index, index + shift_index ,index + 2 * shift_index , a[index]);
//}
//else{
a[index] = a[index] + a[ index + shift_index];
//printf("a[%d] = a[%d] + a[%d] = %d\n",index, index, index + shift_index , a[index]);
//}
}
//__syncthreads();
}
int main(int argc, char** argv){
double start, end;
unsigned bytes = sizeof(int) * N;
//~ unsigned sumbytes = sizeof(int) ;
int *a = (int *) malloc (bytes);
//~ int *sum= (int *) malloc (sumbytes);
fill_values(a,N); // fills random values
//CPUTimer cputimer;
//cputimer.Start();
start = rtclock();
printValues(a,N); // prints and finds cpu sum as well.
//cputimer.Stop();
end = rtclock();
printtime("Sequential time: ", start, end);
//printf("The sequential code ran in %f ms\n", cputimer.Elapsed()*1000);
int *da;
//~ int *dsum; // removing it as a[0] stores the final result
cudaMalloc(&da, bytes);
//~ cudaMalloc(&dsum, sumbytes);
cudaMemset(da, 0,bytes);
//~ cudaMemset(dsum, 0,sumbytes);
cudaMemcpy(da,a,bytes, cudaMemcpyHostToDevice);
unsigned numThreads = 1024;
//GPUTimer gputimer;
//gputimer.Start();
start = rtclock();
for(int i = N/2, j=0; i > 0; j++,i=i/2) {
dk<<< (ceil((float)i/numThreads)) , numThreads>>>(da, i, j);
//cudaDeviceSynchronize();
}
//dk<<< 1, i>>>(da, i, j);
//~ dk<<< 1, N/2>>>(da, N, dsum, 0);
//~ dk<<< 1, N/4>>>(da, N, dsum, 1);
//~ dk<<< 1, N/8>>>(da, N, dsum, 2);
//~ printValues(a,N);
//gputimer.Stop();
end = rtclock();
//printf("The Parallel code ran in %f ms\n", gputimer.Elapsed()*1000);
printtime("Parallel time: ", start, end);
//~ cudaMemcpy(sum,dsum,sumbytes, cudaMemcpyDeviceToHost);
cudaMemcpy(a,da, bytes, cudaMemcpyDeviceToHost);
printf("Gpu sum %d\n", a[0]);
cudaFree(da);
//~ cudaFree(dsum);
return 0;
}
|
040397f66716941f02119e9fce97dcb38eb1b7a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*------------------------------------------------------------------------------
Copyright 2016 by Nicola Bombieri
XLib is provided under the terms of The MIT License (MIT):
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------*/
/**
* @author Federico Busato
* Univerity of Verona, Dept. of Computer Science
* [email protected]
*/
#include "Base/Device/Util/global_sync.cuh"
#include "Base/Device/Util/definition.cuh"
#include "Base/Device/Util/cuda_util.cuh"
namespace xlib {
__device__ unsigned int GlobalSyncArray[MAX_BLOCKSIZE];
namespace {
__global__ void globalSyncResetKernel() {
if (threadIdx.x < blockDim.x)
GlobalSyncArray[threadIdx.x] = 0;
}
} //@anonymous
void globalSyncReset() {
hipLaunchKernelGGL(( globalSyncResetKernel), dim3(1), dim3(MAX_BLOCKSIZE), 0, 0, );
__CUDA_ERROR("GlobalSync : Reset");
}
} //@xlib
| 040397f66716941f02119e9fce97dcb38eb1b7a3.cu | /*------------------------------------------------------------------------------
Copyright © 2016 by Nicola Bombieri
XLib is provided under the terms of The MIT License (MIT):
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------*/
/**
* @author Federico Busato
* Univerity of Verona, Dept. of Computer Science
* [email protected]
*/
#include "Base/Device/Util/global_sync.cuh"
#include "Base/Device/Util/definition.cuh"
#include "Base/Device/Util/cuda_util.cuh"
namespace xlib {
__device__ unsigned int GlobalSyncArray[MAX_BLOCKSIZE];
namespace {
__global__ void globalSyncResetKernel() {
if (threadIdx.x < blockDim.x)
GlobalSyncArray[threadIdx.x] = 0;
}
} //@anonymous
void globalSyncReset() {
globalSyncResetKernel<<<1, MAX_BLOCKSIZE>>>();
__CUDA_ERROR("GlobalSync : Reset");
}
} //@xlib
|
a48cad97a713a0a308c70ae6785fec5f61514931.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string>
#define Thread 1024
__global__ void KernelGaussSeidel(float* deviceA, float* deviceF, float* deviceX0, float*
deviceX1, int N) {
float sum = 0.0f;
for (int i = 0; i < N; i++) deviceX0[i] = deviceX1[i];
int t = blockIdx.x * blockDim.x + threadIdx.x;
for (int j = 0; j < t; j++) sum += deviceA[j + t * N] * deviceX1[j];
for (int j = t + 1; j < N; j++) sum += deviceA[j + t * N] * deviceX0[j];
deviceX1[t] = (deviceF[t] - sum) / deviceA[t + t * N];
}
__global__ void EpsGaussSeidel(float *deviceX0, float *deviceX1, float *delta, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
delta[i] += fabs(deviceX0[i] - deviceX1[i]);
deviceX0[i] = deviceX1[i];
}
int main() {
srand(time(NULL));
float *hostA, *hostX, *hostX0, *hostX1, *hostF, *hostDelta;
float sum, eps;
float EPS = 1.e-5;
int N = 10000;
float size = N * N;
int count;
int Block = (int)ceil((float)N / Thread);
dim3 Blocks(Block);
dim3 Threads(Thread);
int Num_diag = 0.5f*(int)N*0.3f;
float mem_sizeA = sizeof(float)*size;
unsigned int mem_sizeX = sizeof(float)*(N);
hostA = (float*)malloc(mem_sizeA);
hostF = (float*)malloc(mem_sizeX);
hostX = (float*)malloc(mem_sizeX);
hostX0 = (float*)malloc(mem_sizeX);
hostX1 = (float*)malloc(mem_sizeX);
hostDelta = (float*)malloc(mem_sizeX);
for (int i = 0; i < size; i++) {
hostA[i] = 0.0f;
}
for (int i = 0; i < N; i++) {
hostA[i + i * N] = rand() % 50 + 1.0f*N;
}
for (int k = 1; k < Num_diag + 1; k++) {
for (int i = 0; i < N - k; i++) {
hostA[i + k + i * N] = rand() % 5;
hostA[i + (i + k)*N] = rand() % 5;
}
}
for (int i = 0; i < N; i++) {
hostX[i] = rand() % 50;
hostX0[i] = 1.0f;
hostDelta[i] = 0.0f;
}
for (int i = 0; i < N; i++) {
sum = 0.0f;
for (int j = 0; j < N; j++) sum += hostA[j + i * N] * hostX[j];
hostF[i] = sum;
}
float *deviceA, *deviceX0, *deviceX1, *deviceF, *delta;
for (int i = 0; i < N; i++) hostX1[i] = 1.0f;
hipMalloc((void**)&deviceA, mem_sizeA);
hipMalloc((void**)&deviceF, mem_sizeX);
hipMalloc((void**)&deviceX0, mem_sizeX);
hipMalloc((void**)&deviceX1, mem_sizeX);
hipMalloc((void**)&delta, mem_sizeX);
hipMemcpy(deviceA, hostA, mem_sizeA, hipMemcpyHostToDevice);
hipMemcpy(deviceF, hostF, mem_sizeX, hipMemcpyHostToDevice);
hipMemcpy(deviceX0, hostX0, mem_sizeX, hipMemcpyHostToDevice);
hipMemcpy(deviceX1, hostX1, mem_sizeX, hipMemcpyHostToDevice);
count = 0; eps = 1.0f;
while (eps > EPS)
{
count++;
hipMemcpy(delta, hostDelta, mem_sizeX, hipMemcpyHostToDevice);
KernelGaussSeidel << < Blocks, Threads >> > (deviceA, deviceF, deviceX0,
deviceX1, N);
EpsGaussSeidel << < Blocks, Threads >> > (deviceX0, deviceX1, delta, N);
hipMemcpy(hostDelta, delta, mem_sizeX, hipMemcpyDeviceToHost);
eps = 0.0f;
for (int j = 0; j < N; j++) {
eps += hostDelta[j]; hostDelta[j] = 0;
}
eps = eps / N;
}
hipMemcpy(hostX1, deviceX1, mem_sizeX, hipMemcpyDeviceToHost);
hipFree(deviceA);
hipFree(deviceF);
hipFree(deviceX0);
hipFree(deviceX1);
free(hostA);
free(hostF);
free(hostX0);
free(hostX1);
free(hostX);
free(hostDelta);
return 0;
}
| a48cad97a713a0a308c70ae6785fec5f61514931.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string>
#define Thread 1024
__global__ void KernelGaussSeidel(float* deviceA, float* deviceF, float* deviceX0, float*
deviceX1, int N) {
float sum = 0.0f;
for (int i = 0; i < N; i++) deviceX0[i] = deviceX1[i];
int t = blockIdx.x * blockDim.x + threadIdx.x;
for (int j = 0; j < t; j++) sum += deviceA[j + t * N] * deviceX1[j];
for (int j = t + 1; j < N; j++) sum += deviceA[j + t * N] * deviceX0[j];
deviceX1[t] = (deviceF[t] - sum) / deviceA[t + t * N];
}
__global__ void EpsGaussSeidel(float *deviceX0, float *deviceX1, float *delta, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
delta[i] += fabs(deviceX0[i] - deviceX1[i]);
deviceX0[i] = deviceX1[i];
}
int main() {
srand(time(NULL));
float *hostA, *hostX, *hostX0, *hostX1, *hostF, *hostDelta;
float sum, eps;
float EPS = 1.e-5;
int N = 10000;
float size = N * N;
int count;
int Block = (int)ceil((float)N / Thread);
dim3 Blocks(Block);
dim3 Threads(Thread);
int Num_diag = 0.5f*(int)N*0.3f;
float mem_sizeA = sizeof(float)*size;
unsigned int mem_sizeX = sizeof(float)*(N);
hostA = (float*)malloc(mem_sizeA);
hostF = (float*)malloc(mem_sizeX);
hostX = (float*)malloc(mem_sizeX);
hostX0 = (float*)malloc(mem_sizeX);
hostX1 = (float*)malloc(mem_sizeX);
hostDelta = (float*)malloc(mem_sizeX);
for (int i = 0; i < size; i++) {
hostA[i] = 0.0f;
}
for (int i = 0; i < N; i++) {
hostA[i + i * N] = rand() % 50 + 1.0f*N;
}
for (int k = 1; k < Num_diag + 1; k++) {
for (int i = 0; i < N - k; i++) {
hostA[i + k + i * N] = rand() % 5;
hostA[i + (i + k)*N] = rand() % 5;
}
}
for (int i = 0; i < N; i++) {
hostX[i] = rand() % 50;
hostX0[i] = 1.0f;
hostDelta[i] = 0.0f;
}
for (int i = 0; i < N; i++) {
sum = 0.0f;
for (int j = 0; j < N; j++) sum += hostA[j + i * N] * hostX[j];
hostF[i] = sum;
}
float *deviceA, *deviceX0, *deviceX1, *deviceF, *delta;
for (int i = 0; i < N; i++) hostX1[i] = 1.0f;
cudaMalloc((void**)&deviceA, mem_sizeA);
cudaMalloc((void**)&deviceF, mem_sizeX);
cudaMalloc((void**)&deviceX0, mem_sizeX);
cudaMalloc((void**)&deviceX1, mem_sizeX);
cudaMalloc((void**)&delta, mem_sizeX);
cudaMemcpy(deviceA, hostA, mem_sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(deviceF, hostF, mem_sizeX, cudaMemcpyHostToDevice);
cudaMemcpy(deviceX0, hostX0, mem_sizeX, cudaMemcpyHostToDevice);
cudaMemcpy(deviceX1, hostX1, mem_sizeX, cudaMemcpyHostToDevice);
count = 0; eps = 1.0f;
while (eps > EPS)
{
count++;
cudaMemcpy(delta, hostDelta, mem_sizeX, cudaMemcpyHostToDevice);
KernelGaussSeidel << < Blocks, Threads >> > (deviceA, deviceF, deviceX0,
deviceX1, N);
EpsGaussSeidel << < Blocks, Threads >> > (deviceX0, deviceX1, delta, N);
cudaMemcpy(hostDelta, delta, mem_sizeX, cudaMemcpyDeviceToHost);
eps = 0.0f;
for (int j = 0; j < N; j++) {
eps += hostDelta[j]; hostDelta[j] = 0;
}
eps = eps / N;
}
cudaMemcpy(hostX1, deviceX1, mem_sizeX, cudaMemcpyDeviceToHost);
cudaFree(deviceA);
cudaFree(deviceF);
cudaFree(deviceX0);
cudaFree(deviceX1);
free(hostA);
free(hostF);
free(hostX0);
free(hostX1);
free(hostX);
free(hostDelta);
return 0;
}
|
88038f61733514256e2f20d95c0eecfef26dfebb.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathPointwise.cu"
#else
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(real* out, real* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(real* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); \
if (self_ == src) { \
if (!THC_pointwiseApply1(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(hipGetLastError()); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<real>::log, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<real>::log1p, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<real>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<real>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<real>::sin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<real>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<real>::rsqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<real>::ceil, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<real>::floor, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<real>::trunc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics<real>::neg, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<real>::acos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<real>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<real>::asin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<real>::sinh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<real>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<real>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<real>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(round, THCNumerics<real>::round, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<real>::frac, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<real>::cinv, Real)
#endif
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<real>::abs, Real)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) {
THAssert(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorSignOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorSignOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) {
THAssert(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorSigmoidOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorSigmoidOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
#endif
THC_API void
THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2)
{
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == ScalarConvert<int, real>::to(1)) {
// self += src2
if (!THC_pointwiseApply2(state, self_, src2, TensorAddOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += value * src2
if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
if (value == ScalarConvert<int, real>::to(1)) {
// self = src1 + src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 + value * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2)
{
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == ScalarConvert<int, real>::to(1)) {
// self -= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorSubOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += -value * src2
if (!THC_pointwiseApply2(state, self_, src2,
TensorCAddOp<real>(
ScalarNegate<real>::to(value)))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
if (value == ScalarConvert<int, real>::to(1)) {
// self = src1 - src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorSubOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 - value * src2
if (!THC_pointwiseApply3(state, self_, src1, src2,
TensorCAddOp<real>(
ScalarNegate<real>::to(value)))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self *= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorMulOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorMulOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self = pow(self, src2)
if (!THC_pointwiseApply2(state, self_, src2, TensorCPowOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = pow(src1, src2)
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCPowOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self *= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorDivOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorDivOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
#endif
| 88038f61733514256e2f20d95c0eecfef26dfebb.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathPointwise.cu"
#else
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(real* out, real* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(real* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THAssert(THCTensor_(checkGPU)(state, 2, self_, src)); \
if (self_ == src) { \
if (!THC_pointwiseApply1(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(cudaGetLastError()); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<real>::log, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<real>::log1p, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<real>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<real>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<real>::sin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<real>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<real>::rsqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<real>::ceil, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<real>::floor, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<real>::trunc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics<real>::neg, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<real>::acos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<real>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<real>::asin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<real>::sinh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<real>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<real>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<real>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(round, THCNumerics<real>::round, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<real>::frac, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<real>::cinv, Real)
#endif
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<real>::abs, Real)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) {
THAssert(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorSignOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorSignOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) {
THAssert(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorSigmoidOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorSigmoidOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
#endif
THC_API void
THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2)
{
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == ScalarConvert<int, real>::to(1)) {
// self += src2
if (!THC_pointwiseApply2(state, self_, src2, TensorAddOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += value * src2
if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
if (value == ScalarConvert<int, real>::to(1)) {
// self = src1 + src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 + value * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2)
{
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == ScalarConvert<int, real>::to(1)) {
// self -= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorSubOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += -value * src2
if (!THC_pointwiseApply2(state, self_, src2,
TensorCAddOp<real>(
ScalarNegate<real>::to(value)))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
if (value == ScalarConvert<int, real>::to(1)) {
// self = src1 - src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorSubOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 - value * src2
if (!THC_pointwiseApply3(state, self_, src1, src2,
TensorCAddOp<real>(
ScalarNegate<real>::to(value)))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self *= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorMulOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorMulOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self = pow(self, src2)
if (!THC_pointwiseApply2(state, self_, src2, TensorCPowOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = pow(src1, src2)
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCPowOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self *= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorDivOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorDivOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
#endif
|
0a6c597e01754ec64d3a69a63ceefcc3d1adbb0c.hip | // !!! This is a file automatically generated by hipify!!!
#define GET_PROF
#include "hip/hip_runtime.h"
#include "include/helper/cuda/cublas_error_check.h"
#include "include/helper/cuda/cusparse_error_check.h"
#include <assert.h>
#include <stdio.h>
#include "basic_operations.hpp"
#include "dataStructures/hd_data.hpp"
#include "dataStructures/matrix_element.hpp"
#include "helper/cuda/cuda_error_check.h"
#include "helper/cuda/cuda_reduction_operation.hpp"
#include "helper/cuda/cuda_thread_manager.hpp"
chrono_profiler profDot;
void print_dotprofiler() { profDot.print(); }
hipsparseHandle_t cusparseHandle = NULL;
hipblasHandle_t cublasHandle = NULL;
void dot(d_spmatrix &d_mat, d_vector &x, d_vector &result, bool synchronize) {
if (!cusparseHandle)
cusparseErrchk(hipsparseCreate(&cusparseHandle));
assert(d_mat.is_device && x.is_device && result.is_device);
if (&x == &result) {
printf("Error: X and Result vectors should not be the same instance\n");
return;
}
T one = 1.0;
T zero = 0.0;
size_t size = 0;
T *buffer;
auto mat_descr = d_mat.make_sp_descriptor();
auto x_descr = x.make_descriptor();
auto res_descr = result.make_descriptor();
cusparseErrchk(hipsparseSpMV_bufferSize(
cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &one, mat_descr,
x_descr, &zero, res_descr, T_Cuda, HIPSPARSE_MV_ALG_DEFAULT, &size));
if (size > 0)
printf("Alert! size >0 \n");
hipMalloc(&buffer, size);
cusparseErrchk(hipsparseSpMV(
cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &one, mat_descr,
x_descr, &zero, res_descr, T_Cuda, HIPSPARSE_MV_ALG_DEFAULT, buffer));
hipsparseDestroyDnVec(x_descr);
hipsparseDestroyDnVec(res_descr);
hipsparseDestroySpMat(mat_descr);
}
d_vector buffer(0);
__global__ void dotK(d_vector &x, d_vector &y, d_vector &buffer) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= x.n)
return;
buffer.data[i] = x.data[i] * y.data[i];
return;
}
void dot(d_vector &x, d_vector &y, T &result, bool synchronize) {
assert(x.is_device && y.is_device);
assert(x.n == y.n);
if (!cublasHandle)
cublasErrchk(hipblasCreate(&cublasHandle));
#ifdef USE_DOUBLE
cublasErrchk(hipblasDdot(cublasHandle, x.n, x.data, 1, y.data, 1, &result));
#else
cublasErrchk(hipblasSdot(cublasHandle, x.n, x.data, sizeof(T), y.data,
sizeof(T), &result));
#endif
// dim3Pair threadblock = make1DThreadBlock(x.n);
// if (buffer.n < x.n)
// buffer.resize(x.n);
// else
// buffer.n = x.n;
// dotK<<<threadblock.block, threadblock.thread>>>(*(d_vector *)x._device,
// *(d_vector *)y._device,
// *(d_vector
// *)buffer._device);
// ReductionOperation(buffer, sum);
// hipMemcpy(&result, buffer.data, sizeof(T), hipMemcpyDeviceToDevice);
if (synchronize) {
gpuErrchk(hipDeviceSynchronize());
} else
return;
}
__global__ void vector_sumK(d_vector &a, d_vector &b, T &alpha, d_vector &c) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= a.n)
return;
c.data[i] = a.data[i] + b.data[i] * alpha;
};
void vector_sum(d_vector &a, d_vector &b, T &alpha, d_vector &c,
bool synchronize) {
assert(a.is_device && b.is_device);
assert(a.n == b.n);
dim3Pair threadblock = make1DThreadBlock(a.n);
hipLaunchKernelGGL(( vector_sumK), dim3(threadblock.block), dim3(threadblock.thread), 0, 0,
*(d_vector *)a._device, *(d_vector *)b._device, alpha,
*(d_vector *)c._device);
if (synchronize)
gpuErrchk(hipDeviceSynchronize());
}
void vector_sum(d_vector &a, d_vector &b, d_vector &c, bool synchronize) {
hd_data<T> alpha(1.0);
vector_sum(a, b, alpha(true), c, synchronize);
}
__device__ inline bool IsSup(matrix_elm &it_a, matrix_elm &it_b) {
return (it_a.i == it_b.i && it_a.j > it_b.j) || it_a.i > it_b.i;
};
__device__ inline bool IsEqu(matrix_elm &it_a, matrix_elm &it_b) {
return (it_a.i == it_b.i && it_a.j == it_b.j);
};
__device__ inline bool IsSupEqu(matrix_elm &it_a, matrix_elm &it_b) {
return (it_a.i == it_b.i && it_a.j >= it_b.j) || it_a.i > it_b.i;
};
__global__ void sum_nnzK(d_spmatrix &a, d_spmatrix &b, int *nnz) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= a.rows)
return;
if (i == 0)
nnz[0] = 0;
matrix_elm it_a(a.rowPtr[i], &a);
matrix_elm it_b(b.rowPtr[i], &b);
nnz[i + 1] = 0;
while (it_a.i == i || it_b.i == i) {
if (IsEqu(it_a, it_b)) {
it_a.next();
it_b.next();
nnz[i + 1] += 1;
} else if (IsSup(it_a, it_b)) {
it_b.next();
nnz[i + 1] += 1;
} else if (IsSup(it_b, it_a)) {
it_a.next();
nnz[i + 1] += 1;
} else {
printf("Error! Nobody was iterated in sum_nnzK function.\n");
return;
}
}
return;
}
__global__ void set_valuesK(d_spmatrix &a, d_spmatrix &b, T &alpha,
d_spmatrix &c) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= c.rows)
return;
matrix_elm it_a(a.rowPtr[i], &a);
matrix_elm it_b(b.rowPtr[i], &b);
int k = c.rowPtr[i];
if (k >= c.nnz) {
printf("Error! In matrix sum, at %i\n", i);
return;
}
while (it_a.i == i || it_b.i == i) {
if (IsEqu(it_a, it_b)) {
c.colPtr[k] = it_a.j;
c.data[k] = it_a.val[0] + alpha * it_b.val[0];
it_a.next();
it_b.next();
} else if (IsSup(it_a, it_b)) {
c.colPtr[k] = it_b.j;
c.data[k] = alpha * it_b.val[0];
it_b.next();
} else if (IsSup(it_b, it_a)) {
c.colPtr[k] = it_a.j;
c.data[k] = it_a.val[0];
it_a.next();
} else {
printf("Error! Nobody was iterated in sum_nnzK function.\n");
return;
}
k++;
}
return;
}
void matrix_sum(d_spmatrix &a, d_spmatrix &b, T &alpha, d_spmatrix &c) {
// This method is only impleted in the specific case of CSR matrices
assert(a.type == CSR && b.type == CSR);
assert(a.rows == b.rows && a.cols == b.cols);
c.rows = 1 * a.rows;
c.cols = 1 * a.cols;
c.type = CSR;
int *nnzs;
hipMalloc(&nnzs, sizeof(int) * (a.rows + 1));
auto tb = make1DThreadBlock(a.rows);
hipLaunchKernelGGL(( sum_nnzK), dim3(tb.block), dim3(tb.thread), 0, 0, *a._device, *b._device, nnzs);
ReductionIncreasing(nnzs, a.rows + 1);
hd_data<int> nnz(&nnzs[a.rows], true);
c.set_nnz(nnz());
gpuErrchk(hipMemcpy(c.rowPtr, nnzs, sizeof(int) * (a.rows + 1),
hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( set_valuesK), dim3(tb.block), dim3(tb.thread), 0, 0, *a._device, *b._device, alpha,
*c._device);
gpuErrchk(hipDeviceSynchronize());
return;
}
void matrix_sum(d_spmatrix &a, d_spmatrix &b, d_spmatrix &c) {
hd_data<T> d_alpha(1.0);
matrix_sum(a, b, d_alpha(true), c);
}
__global__ void scalar_multK(T *data, int n, T &alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n)
return;
data[i] *= alpha;
return;
}
void scalar_mult(d_spmatrix &a, T &alpha) {
assert(a.is_device);
dim3Pair threadblock = make1DThreadBlock(a.nnz);
hipLaunchKernelGGL(( scalar_multK), dim3(threadblock.block), dim3(threadblock.thread), 0, 0, a.data, a.nnz,
alpha);
}
void scalar_mult(d_vector &a, T &alpha) {
assert(a.is_device);
dim3Pair threadblock = make1DThreadBlock(a.n);
hipLaunchKernelGGL(( scalar_multK), dim3(threadblock.block), dim3(threadblock.thread), 0, 0, a.data, a.n, alpha);
}
| 0a6c597e01754ec64d3a69a63ceefcc3d1adbb0c.cu | #define GET_PROF
#include "cuda_runtime.h"
#include "include/helper/cuda/cublas_error_check.h"
#include "include/helper/cuda/cusparse_error_check.h"
#include <assert.h>
#include <stdio.h>
#include "basic_operations.hpp"
#include "dataStructures/hd_data.hpp"
#include "dataStructures/matrix_element.hpp"
#include "helper/cuda/cuda_error_check.h"
#include "helper/cuda/cuda_reduction_operation.hpp"
#include "helper/cuda/cuda_thread_manager.hpp"
chrono_profiler profDot;
void print_dotprofiler() { profDot.print(); }
cusparseHandle_t cusparseHandle = NULL;
cublasHandle_t cublasHandle = NULL;
void dot(d_spmatrix &d_mat, d_vector &x, d_vector &result, bool synchronize) {
if (!cusparseHandle)
cusparseErrchk(cusparseCreate(&cusparseHandle));
assert(d_mat.is_device && x.is_device && result.is_device);
if (&x == &result) {
printf("Error: X and Result vectors should not be the same instance\n");
return;
}
T one = 1.0;
T zero = 0.0;
size_t size = 0;
T *buffer;
auto mat_descr = d_mat.make_sp_descriptor();
auto x_descr = x.make_descriptor();
auto res_descr = result.make_descriptor();
cusparseErrchk(cusparseSpMV_bufferSize(
cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, &one, mat_descr,
x_descr, &zero, res_descr, T_Cuda, CUSPARSE_MV_ALG_DEFAULT, &size));
if (size > 0)
printf("Alert! size >0 \n");
cudaMalloc(&buffer, size);
cusparseErrchk(cusparseSpMV(
cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, &one, mat_descr,
x_descr, &zero, res_descr, T_Cuda, CUSPARSE_MV_ALG_DEFAULT, buffer));
cusparseDestroyDnVec(x_descr);
cusparseDestroyDnVec(res_descr);
cusparseDestroySpMat(mat_descr);
}
d_vector buffer(0);
__global__ void dotK(d_vector &x, d_vector &y, d_vector &buffer) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= x.n)
return;
buffer.data[i] = x.data[i] * y.data[i];
return;
}
void dot(d_vector &x, d_vector &y, T &result, bool synchronize) {
assert(x.is_device && y.is_device);
assert(x.n == y.n);
if (!cublasHandle)
cublasErrchk(cublasCreate(&cublasHandle));
#ifdef USE_DOUBLE
cublasErrchk(cublasDdot(cublasHandle, x.n, x.data, 1, y.data, 1, &result));
#else
cublasErrchk(cublasSdot(cublasHandle, x.n, x.data, sizeof(T), y.data,
sizeof(T), &result));
#endif
// dim3Pair threadblock = make1DThreadBlock(x.n);
// if (buffer.n < x.n)
// buffer.resize(x.n);
// else
// buffer.n = x.n;
// dotK<<<threadblock.block, threadblock.thread>>>(*(d_vector *)x._device,
// *(d_vector *)y._device,
// *(d_vector
// *)buffer._device);
// ReductionOperation(buffer, sum);
// cudaMemcpy(&result, buffer.data, sizeof(T), cudaMemcpyDeviceToDevice);
if (synchronize) {
gpuErrchk(cudaDeviceSynchronize());
} else
return;
}
__global__ void vector_sumK(d_vector &a, d_vector &b, T &alpha, d_vector &c) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= a.n)
return;
c.data[i] = a.data[i] + b.data[i] * alpha;
};
void vector_sum(d_vector &a, d_vector &b, T &alpha, d_vector &c,
bool synchronize) {
assert(a.is_device && b.is_device);
assert(a.n == b.n);
dim3Pair threadblock = make1DThreadBlock(a.n);
vector_sumK<<<threadblock.block, threadblock.thread>>>(
*(d_vector *)a._device, *(d_vector *)b._device, alpha,
*(d_vector *)c._device);
if (synchronize)
gpuErrchk(cudaDeviceSynchronize());
}
void vector_sum(d_vector &a, d_vector &b, d_vector &c, bool synchronize) {
hd_data<T> alpha(1.0);
vector_sum(a, b, alpha(true), c, synchronize);
}
__device__ inline bool IsSup(matrix_elm &it_a, matrix_elm &it_b) {
return (it_a.i == it_b.i && it_a.j > it_b.j) || it_a.i > it_b.i;
};
__device__ inline bool IsEqu(matrix_elm &it_a, matrix_elm &it_b) {
return (it_a.i == it_b.i && it_a.j == it_b.j);
};
__device__ inline bool IsSupEqu(matrix_elm &it_a, matrix_elm &it_b) {
return (it_a.i == it_b.i && it_a.j >= it_b.j) || it_a.i > it_b.i;
};
__global__ void sum_nnzK(d_spmatrix &a, d_spmatrix &b, int *nnz) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= a.rows)
return;
if (i == 0)
nnz[0] = 0;
matrix_elm it_a(a.rowPtr[i], &a);
matrix_elm it_b(b.rowPtr[i], &b);
nnz[i + 1] = 0;
while (it_a.i == i || it_b.i == i) {
if (IsEqu(it_a, it_b)) {
it_a.next();
it_b.next();
nnz[i + 1] += 1;
} else if (IsSup(it_a, it_b)) {
it_b.next();
nnz[i + 1] += 1;
} else if (IsSup(it_b, it_a)) {
it_a.next();
nnz[i + 1] += 1;
} else {
printf("Error! Nobody was iterated in sum_nnzK function.\n");
return;
}
}
return;
}
__global__ void set_valuesK(d_spmatrix &a, d_spmatrix &b, T &alpha,
d_spmatrix &c) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= c.rows)
return;
matrix_elm it_a(a.rowPtr[i], &a);
matrix_elm it_b(b.rowPtr[i], &b);
int k = c.rowPtr[i];
if (k >= c.nnz) {
printf("Error! In matrix sum, at %i\n", i);
return;
}
while (it_a.i == i || it_b.i == i) {
if (IsEqu(it_a, it_b)) {
c.colPtr[k] = it_a.j;
c.data[k] = it_a.val[0] + alpha * it_b.val[0];
it_a.next();
it_b.next();
} else if (IsSup(it_a, it_b)) {
c.colPtr[k] = it_b.j;
c.data[k] = alpha * it_b.val[0];
it_b.next();
} else if (IsSup(it_b, it_a)) {
c.colPtr[k] = it_a.j;
c.data[k] = it_a.val[0];
it_a.next();
} else {
printf("Error! Nobody was iterated in sum_nnzK function.\n");
return;
}
k++;
}
return;
}
void matrix_sum(d_spmatrix &a, d_spmatrix &b, T &alpha, d_spmatrix &c) {
// This method is only impleted in the specific case of CSR matrices
assert(a.type == CSR && b.type == CSR);
assert(a.rows == b.rows && a.cols == b.cols);
c.rows = 1 * a.rows;
c.cols = 1 * a.cols;
c.type = CSR;
int *nnzs;
cudaMalloc(&nnzs, sizeof(int) * (a.rows + 1));
auto tb = make1DThreadBlock(a.rows);
sum_nnzK<<<tb.block, tb.thread>>>(*a._device, *b._device, nnzs);
ReductionIncreasing(nnzs, a.rows + 1);
hd_data<int> nnz(&nnzs[a.rows], true);
c.set_nnz(nnz());
gpuErrchk(cudaMemcpy(c.rowPtr, nnzs, sizeof(int) * (a.rows + 1),
cudaMemcpyDeviceToDevice));
set_valuesK<<<tb.block, tb.thread>>>(*a._device, *b._device, alpha,
*c._device);
gpuErrchk(cudaDeviceSynchronize());
return;
}
void matrix_sum(d_spmatrix &a, d_spmatrix &b, d_spmatrix &c) {
hd_data<T> d_alpha(1.0);
matrix_sum(a, b, d_alpha(true), c);
}
__global__ void scalar_multK(T *data, int n, T &alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n)
return;
data[i] *= alpha;
return;
}
void scalar_mult(d_spmatrix &a, T &alpha) {
assert(a.is_device);
dim3Pair threadblock = make1DThreadBlock(a.nnz);
scalar_multK<<<threadblock.block, threadblock.thread>>>(a.data, a.nnz,
alpha);
}
void scalar_mult(d_vector &a, T &alpha) {
assert(a.is_device);
dim3Pair threadblock = make1DThreadBlock(a.n);
scalar_multK<<<threadblock.block, threadblock.thread>>>(a.data, a.n, alpha);
}
|
21ae2a08b14391378fe3ebec5d8356a01fe6bc4f.hip | // !!! This is a file automatically generated by hipify!!!
#include "jacketSDK.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "hip/device_functions.h"
#include <iostream>
#define TPB 96
//first do this without the turbulence model...then with.
__global__ void channel2D_VW_PE_MRT_ts(double * fOut, double * fIn, int * inl,
int * onl, int * snl, double * ux_p,
double rho_out, double * omega_op,int Nx, int Ny){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
int nnodes = Nx*Ny;
if(tid<nnodes){
double fi1,fi2,fi3,fi4,fi5,fi6,fi7,fi8,fi9;
double fe1,fe2,fe3,fe4,fe5,fe6,fe7,fe8,fe9;
double fo1,fo2,fo3,fo4,fo5,fo6,fo7,fo8,fo9;
__shared__ double omega[9][9];
//get the density data for the lattice point.
fi1=fIn[tid];
fi2=fIn[nnodes+tid];
fi3=fIn[2*nnodes+tid];
fi4=fIn[3*nnodes+tid];
fi5=fIn[4*nnodes+tid];
fi6=fIn[5*nnodes+tid];
fi7=fIn[6*nnodes+tid];
fi8=fIn[7*nnodes+tid];
fi9=fIn[8*nnodes+tid];
//load portion of omega into shared memory
if(threadIdx.x<81){
int col=threadIdx.x/9;
int row = threadIdx.x-col*9;
omega[row][col]=*(omega_op+9*col+row);
}
//compute rho
double rho = fi1+fi2+fi3+fi4+fi5+fi6+fi7+fi8+fi9;
//compute velocity
double ux = (1/rho)*(fi2+fi6+fi9 - (fi7+fi4+fi8));
double uy = (1/rho)*(fi6+fi3+fi7 - (fi8+fi5+fi9));
//if tid is an inlet node, set inlet Macroscopic and microscopic BC
if(inl[tid]==1){
ux = ux_p[tid];
uy = 0.0;
rho = (1./(1-ux))*(fi1+fi3+fi5+2.0*(fi4+fi7+fi8));
//now set microscopic bc on the inlet
fi2 = fi4+(2./3.)*rho*ux;
fi6=fi8+(0.5)*(fi5-fi3)+(0.5)*rho*uy+(1./6.)*rho*ux;
fi9=fi7+0.5*(fi3-fi5)-0.5*rho*uy+(1./6.)*rho*ux;
}//if(inlet_node_list[tid]==1)...
//if tid is an outlet node, set outlet Macroscopic and microscopic BC
if(onl[tid]==1){
//macroscopic BC
rho = rho_out;
ux = -1. + (1./rho)*(fi1+fi3+fi5+2.0*(fi2+fi6+fi9));
uy = 0.;
//microscopic BC
fi4 = fi2-(2./3.)*rho*ux;
fi8=fi6+0.5*(fi3-fi5)+0.5*rho*uy-(1./6.)*rho*ux;
fi7 = fi9+0.5*(fi5-fi3)+0.5*rho*uy-(1./6.)*rho*ux;
}//if(outlet_node_list[tid]==1)...
//everybody compute fEq
//speed 1
double w = 4./9.;
double cu = 0.;
fe1 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 2
w = 1./9.;
cu = 3.0*ux;
fe2 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 3
cu = 3.0*uy;
fe3 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 4
cu = -3.0*ux;
fe4 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 5
cu = -3.0*uy;
fe5 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 6
w = 1./36.;
cu = 3.0*(ux+uy);
fe6 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 7
cu = 3.0*(-ux+uy);
fe7 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 8
cu = 3.0*(-ux-uy);
fe8 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 9
cu= 3.0*(ux-uy);
fe9 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//really...I need fe to equal the non-equilibrium part...
fe1=fi1-fe1;
fe2=fi2-fe2;
fe3=fi3-fe3;
fe4=fi4-fe4;
fe5=fi5-fe5;
fe6=fi6-fe6;
fe7=fi7-fe7;
fe8=fi8-fe8;
fe9=fi9-fe9;
//MRT relaxation
__syncthreads();//make sure omega is loaded...
fo1=fi1-(fe1*omega[0][0]+fe2*omega[1][0]+fe3*omega[2][0]+fe4*omega[3][0]+fe5*omega[4][0]+fe6*omega[5][0]+fe7*omega[6][0]+fe8*omega[7][0]+fe9*omega[8][0]);
fo2=fi2-(fe1*omega[0][1]+fe2*omega[1][1]+fe3*omega[2][1]+fe4*omega[3][1]+fe5*omega[4][1]+fe6*omega[5][1]+fe7*omega[6][1]+fe8*omega[7][1]+fe9*omega[8][1]);
fo3=fi3-(fe1*omega[0][2]+fe2*omega[1][2]+fe3*omega[2][2]+fe4*omega[3][2]+fe5*omega[4][2]+fe6*omega[5][2]+fe7*omega[6][2]+fe8*omega[7][2]+fe9*omega[8][2]);
fo4=fi4-(fe1*omega[0][3]+fe2*omega[1][3]+fe3*omega[2][3]+fe4*omega[3][3]+fe5*omega[4][3]+fe6*omega[5][3]+fe7*omega[6][3]+fe8*omega[7][3]+fe9*omega[8][3]);
fo5=fi5-(fe1*omega[0][4]+fe2*omega[1][4]+fe3*omega[2][4]+fe4*omega[3][4]+fe5*omega[4][4]+fe6*omega[5][4]+fe7*omega[6][4]+fe8*omega[7][4]+fe9*omega[8][4]);
fo6=fi6-(fe1*omega[0][5]+fe2*omega[1][5]+fe3*omega[2][5]+fe4*omega[3][5]+fe5*omega[4][5]+fe6*omega[5][5]+fe7*omega[6][5]+fe8*omega[7][5]+fe9*omega[8][5]);
fo7=fi7-(fe1*omega[0][6]+fe2*omega[1][6]+fe3*omega[2][6]+fe4*omega[3][6]+fe5*omega[4][6]+fe6*omega[5][6]+fe7*omega[6][6]+fe8*omega[7][6]+fe9*omega[8][6]);
fo8=fi8-(fe1*omega[0][7]+fe2*omega[1][7]+fe3*omega[2][7]+fe4*omega[3][7]+fe5*omega[4][7]+fe6*omega[5][7]+fe7*omega[6][7]+fe8*omega[7][7]+fe9*omega[8][7]);
fo9=fi9-(fe1*omega[0][8]+fe2*omega[1][8]+fe3*omega[2][8]+fe4*omega[3][8]+fe5*omega[4][8]+fe6*omega[5][8]+fe7*omega[6][8]+fe8*omega[7][8]+fe9*omega[8][8]);
//bounce-back nodes do this instead...
if(snl[tid]==1){
fo1=fi1;
fo2=fi4; fo4=fi2;
fo3=fi5; fo5=fi3;
fo6=fi8; fo8=fi6;
fo7=fi9; fo9=fi7;
ux = 0.; uy = 0.;
}//if(solid_node_list[tid]==1)...
// stream the result...
//compute the local stream vector...
int x;
int y;
int yn;
int ys;
int xe;
int xw;
//int dir;
int dof_num; //int f_num;
x = tid%Nx+1;
y = ((tid+1)-x+1)/Nx + 1;
yn = y%Ny+1;
xe = x%Nx+1;
if(y==1){
ys = Ny;
}else{
ys = y-1;
}
if(x==1){
xw=Nx;
}else{
xw=x-1;
}
dof_num = Nx*(y-1)+x;
fOut[dof_num-1]=fo1;
dof_num=Nx*(y-1)+xe;
fOut[nnodes+dof_num-1]=fo2;
dof_num=Nx*(yn-1)+x;
fOut[2*nnodes+dof_num-1]=fo3;
dof_num=Nx*(y-1)+xw;
fOut[3*nnodes+dof_num-1]=fo4;
dof_num=Nx*(ys-1)+x;
fOut[4*nnodes+dof_num-1]=fo5;
dof_num=Nx*(yn-1)+xe;
fOut[5*nnodes+dof_num-1]=fo6;
dof_num=Nx*(yn-1)+xw;
fOut[6*nnodes+dof_num-1]=fo7;
dof_num=Nx*(ys-1)+xw;
fOut[7*nnodes+dof_num-1]=fo8;
dof_num=Nx*(ys-1)+xe;
fOut[8*nnodes+dof_num-1]=fo9;
}
}
err_t jktFunction(int nlhs, mxArray * plhs[], int nrhs, mxArray * prhs[]){
if(nrhs!=10)
return err("Usage: channel2D_VW_PE_MRT_DPts(fOut,fIn,inl,onl,snl,ux_p,rho_out,omega_op,Nx,Ny");
mxArray * m_fOut = prhs[0];
mxArray * m_fIn = prhs[1];
mxArray * m_inl = prhs[2];
mxArray * m_onl = prhs[3];
mxArray * m_snl = prhs[4];
mxArray * m_ux_p = prhs[5];
double rho_out = mxGetScalar(prhs[6]);
mxArray * m_omega_op = prhs[7];
int Nx = mxGetScalar(prhs[8]);
int Ny = mxGetScalar(prhs[9]);
int nnodes = Nx*Ny;
double * fOut; double * fIn; int * inl; int * onl; int * snl;
double * ux_p;
double * omega_op;
jkt_mem((void**)&fOut,m_fOut);
jkt_mem((void**)&fIn,m_fIn);
jkt_mem((void**)&inl,m_inl);
jkt_mem((void**)&onl,m_onl);
jkt_mem((void**)&snl,m_snl);
jkt_mem((void**)&ux_p,m_ux_p);
jkt_mem((void**)&omega_op,m_omega_op);
dim3 BLOCKS(TPB,1,1);
dim3 GRIDS((nnodes+TPB-1)/TPB,1,1);
hipLaunchKernelGGL(( channel2D_VW_PE_MRT_ts), dim3(GRIDS),dim3(BLOCKS), 0, 0, fOut,fIn,inl,onl,snl,ux_p,rho_out,omega_op,Nx,Ny);
return errNone;
}
| 21ae2a08b14391378fe3ebec5d8356a01fe6bc4f.cu | #include "jacketSDK.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_functions.h"
#include <iostream>
#define TPB 96
//first do this without the turbulence model...then with.
__global__ void channel2D_VW_PE_MRT_ts(double * fOut, double * fIn, int * inl,
int * onl, int * snl, double * ux_p,
double rho_out, double * omega_op,int Nx, int Ny){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
int nnodes = Nx*Ny;
if(tid<nnodes){
double fi1,fi2,fi3,fi4,fi5,fi6,fi7,fi8,fi9;
double fe1,fe2,fe3,fe4,fe5,fe6,fe7,fe8,fe9;
double fo1,fo2,fo3,fo4,fo5,fo6,fo7,fo8,fo9;
__shared__ double omega[9][9];
//get the density data for the lattice point.
fi1=fIn[tid];
fi2=fIn[nnodes+tid];
fi3=fIn[2*nnodes+tid];
fi4=fIn[3*nnodes+tid];
fi5=fIn[4*nnodes+tid];
fi6=fIn[5*nnodes+tid];
fi7=fIn[6*nnodes+tid];
fi8=fIn[7*nnodes+tid];
fi9=fIn[8*nnodes+tid];
//load portion of omega into shared memory
if(threadIdx.x<81){
int col=threadIdx.x/9;
int row = threadIdx.x-col*9;
omega[row][col]=*(omega_op+9*col+row);
}
//compute rho
double rho = fi1+fi2+fi3+fi4+fi5+fi6+fi7+fi8+fi9;
//compute velocity
double ux = (1/rho)*(fi2+fi6+fi9 - (fi7+fi4+fi8));
double uy = (1/rho)*(fi6+fi3+fi7 - (fi8+fi5+fi9));
//if tid is an inlet node, set inlet Macroscopic and microscopic BC
if(inl[tid]==1){
ux = ux_p[tid];
uy = 0.0;
rho = (1./(1-ux))*(fi1+fi3+fi5+2.0*(fi4+fi7+fi8));
//now set microscopic bc on the inlet
fi2 = fi4+(2./3.)*rho*ux;
fi6=fi8+(0.5)*(fi5-fi3)+(0.5)*rho*uy+(1./6.)*rho*ux;
fi9=fi7+0.5*(fi3-fi5)-0.5*rho*uy+(1./6.)*rho*ux;
}//if(inlet_node_list[tid]==1)...
//if tid is an outlet node, set outlet Macroscopic and microscopic BC
if(onl[tid]==1){
//macroscopic BC
rho = rho_out;
ux = -1. + (1./rho)*(fi1+fi3+fi5+2.0*(fi2+fi6+fi9));
uy = 0.;
//microscopic BC
fi4 = fi2-(2./3.)*rho*ux;
fi8=fi6+0.5*(fi3-fi5)+0.5*rho*uy-(1./6.)*rho*ux;
fi7 = fi9+0.5*(fi5-fi3)+0.5*rho*uy-(1./6.)*rho*ux;
}//if(outlet_node_list[tid]==1)...
//everybody compute fEq
//speed 1
double w = 4./9.;
double cu = 0.;
fe1 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 2
w = 1./9.;
cu = 3.0*ux;
fe2 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 3
cu = 3.0*uy;
fe3 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 4
cu = -3.0*ux;
fe4 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 5
cu = -3.0*uy;
fe5 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 6
w = 1./36.;
cu = 3.0*(ux+uy);
fe6 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 7
cu = 3.0*(-ux+uy);
fe7 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 8
cu = 3.0*(-ux-uy);
fe8 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//speed 9
cu= 3.0*(ux-uy);
fe9 = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
//really...I need fe to equal the non-equilibrium part...
fe1=fi1-fe1;
fe2=fi2-fe2;
fe3=fi3-fe3;
fe4=fi4-fe4;
fe5=fi5-fe5;
fe6=fi6-fe6;
fe7=fi7-fe7;
fe8=fi8-fe8;
fe9=fi9-fe9;
//MRT relaxation
__syncthreads();//make sure omega is loaded...
fo1=fi1-(fe1*omega[0][0]+fe2*omega[1][0]+fe3*omega[2][0]+fe4*omega[3][0]+fe5*omega[4][0]+fe6*omega[5][0]+fe7*omega[6][0]+fe8*omega[7][0]+fe9*omega[8][0]);
fo2=fi2-(fe1*omega[0][1]+fe2*omega[1][1]+fe3*omega[2][1]+fe4*omega[3][1]+fe5*omega[4][1]+fe6*omega[5][1]+fe7*omega[6][1]+fe8*omega[7][1]+fe9*omega[8][1]);
fo3=fi3-(fe1*omega[0][2]+fe2*omega[1][2]+fe3*omega[2][2]+fe4*omega[3][2]+fe5*omega[4][2]+fe6*omega[5][2]+fe7*omega[6][2]+fe8*omega[7][2]+fe9*omega[8][2]);
fo4=fi4-(fe1*omega[0][3]+fe2*omega[1][3]+fe3*omega[2][3]+fe4*omega[3][3]+fe5*omega[4][3]+fe6*omega[5][3]+fe7*omega[6][3]+fe8*omega[7][3]+fe9*omega[8][3]);
fo5=fi5-(fe1*omega[0][4]+fe2*omega[1][4]+fe3*omega[2][4]+fe4*omega[3][4]+fe5*omega[4][4]+fe6*omega[5][4]+fe7*omega[6][4]+fe8*omega[7][4]+fe9*omega[8][4]);
fo6=fi6-(fe1*omega[0][5]+fe2*omega[1][5]+fe3*omega[2][5]+fe4*omega[3][5]+fe5*omega[4][5]+fe6*omega[5][5]+fe7*omega[6][5]+fe8*omega[7][5]+fe9*omega[8][5]);
fo7=fi7-(fe1*omega[0][6]+fe2*omega[1][6]+fe3*omega[2][6]+fe4*omega[3][6]+fe5*omega[4][6]+fe6*omega[5][6]+fe7*omega[6][6]+fe8*omega[7][6]+fe9*omega[8][6]);
fo8=fi8-(fe1*omega[0][7]+fe2*omega[1][7]+fe3*omega[2][7]+fe4*omega[3][7]+fe5*omega[4][7]+fe6*omega[5][7]+fe7*omega[6][7]+fe8*omega[7][7]+fe9*omega[8][7]);
fo9=fi9-(fe1*omega[0][8]+fe2*omega[1][8]+fe3*omega[2][8]+fe4*omega[3][8]+fe5*omega[4][8]+fe6*omega[5][8]+fe7*omega[6][8]+fe8*omega[7][8]+fe9*omega[8][8]);
//bounce-back nodes do this instead...
if(snl[tid]==1){
fo1=fi1;
fo2=fi4; fo4=fi2;
fo3=fi5; fo5=fi3;
fo6=fi8; fo8=fi6;
fo7=fi9; fo9=fi7;
ux = 0.; uy = 0.;
}//if(solid_node_list[tid]==1)...
// stream the result...
//compute the local stream vector...
int x;
int y;
int yn;
int ys;
int xe;
int xw;
//int dir;
int dof_num; //int f_num;
x = tid%Nx+1;
y = ((tid+1)-x+1)/Nx + 1;
yn = y%Ny+1;
xe = x%Nx+1;
if(y==1){
ys = Ny;
}else{
ys = y-1;
}
if(x==1){
xw=Nx;
}else{
xw=x-1;
}
dof_num = Nx*(y-1)+x;
fOut[dof_num-1]=fo1;
dof_num=Nx*(y-1)+xe;
fOut[nnodes+dof_num-1]=fo2;
dof_num=Nx*(yn-1)+x;
fOut[2*nnodes+dof_num-1]=fo3;
dof_num=Nx*(y-1)+xw;
fOut[3*nnodes+dof_num-1]=fo4;
dof_num=Nx*(ys-1)+x;
fOut[4*nnodes+dof_num-1]=fo5;
dof_num=Nx*(yn-1)+xe;
fOut[5*nnodes+dof_num-1]=fo6;
dof_num=Nx*(yn-1)+xw;
fOut[6*nnodes+dof_num-1]=fo7;
dof_num=Nx*(ys-1)+xw;
fOut[7*nnodes+dof_num-1]=fo8;
dof_num=Nx*(ys-1)+xe;
fOut[8*nnodes+dof_num-1]=fo9;
}
}
err_t jktFunction(int nlhs, mxArray * plhs[], int nrhs, mxArray * prhs[]){
if(nrhs!=10)
return err("Usage: channel2D_VW_PE_MRT_DPts(fOut,fIn,inl,onl,snl,ux_p,rho_out,omega_op,Nx,Ny");
mxArray * m_fOut = prhs[0];
mxArray * m_fIn = prhs[1];
mxArray * m_inl = prhs[2];
mxArray * m_onl = prhs[3];
mxArray * m_snl = prhs[4];
mxArray * m_ux_p = prhs[5];
double rho_out = mxGetScalar(prhs[6]);
mxArray * m_omega_op = prhs[7];
int Nx = mxGetScalar(prhs[8]);
int Ny = mxGetScalar(prhs[9]);
int nnodes = Nx*Ny;
double * fOut; double * fIn; int * inl; int * onl; int * snl;
double * ux_p;
double * omega_op;
jkt_mem((void**)&fOut,m_fOut);
jkt_mem((void**)&fIn,m_fIn);
jkt_mem((void**)&inl,m_inl);
jkt_mem((void**)&onl,m_onl);
jkt_mem((void**)&snl,m_snl);
jkt_mem((void**)&ux_p,m_ux_p);
jkt_mem((void**)&omega_op,m_omega_op);
dim3 BLOCKS(TPB,1,1);
dim3 GRIDS((nnodes+TPB-1)/TPB,1,1);
channel2D_VW_PE_MRT_ts<<<GRIDS,BLOCKS>>>(fOut,fIn,inl,onl,snl,ux_p,rho_out,omega_op,Nx,Ny);
return errNone;
}
|
5fd84cd23d5a0db10c229b171bc53bc61a8fdd5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "local_contrast_subtractive_2d_layer_hessian_cuda.h"
#include "../local_contrast_subtractive_layer.h"
#include "util_cuda.h"
__global__ void local_contrast_subtractive_2d_blur_horizontal_hess_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int window_width,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_width; ++i)
{
current_weights++;
if (i < x + 1)
current_input_low--;
if (i > x + 1)
current_input_low++;
if (i < width - x)
current_input_high++;
if (i > width - x)
current_input_high--;
res += (*current_input_low + *current_input_high) * *current_weights;
}
output[(z * height + y) * width + x] = res;
}
}
template<int WINDOW_WIDTH>
__global__ void local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
current_weights++;
if (i < x + 1)
current_input_low--;
if (i > x + 1)
current_input_low++;
if (i < width - x)
current_input_high++;
if (i > width - x)
current_input_high--;
res += (*current_input_low + *current_input_high) * *current_weights;
}
output[(z * height + y) * width + x] = res;
}
}
__global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_hess_kernel(
const float * __restrict input,
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int window_height,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_height; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = original_input[offset] - res;
}
}
template<int WINDOW_HEIGHT>
__global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel(
const float * __restrict input,
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_HEIGHT; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = original_input[offset] - res;
}
}
__global__ void local_contrast_subtractive_2d_copy_unaffected_hess_kernel(
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict unaffected_feature_map_list,
int input_feature_map_count,
int unaffected_feature_map_count,
int elem_count_per_fature_map,
int entry_count)
{
int elem_id = blockIdx.x * blockDim.x + threadIdx.x;
int unaffected_feature_map_index = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (elem_id < elem_count_per_fature_map) && (unaffected_feature_map_index < unaffected_feature_map_count) && (entry_id < entry_count);
if (in_bounds)
{
int unaffected_feature_map_id = unaffected_feature_map_list[unaffected_feature_map_index];
int offset = (entry_id * input_feature_map_count + unaffected_feature_map_id) * elem_count_per_fature_map + elem_id;
output[offset] = original_input[offset];
}
}
__global__ void local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_hess_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights_squared,
float central_mult,
int input_feature_map_count,
int affected_feature_map_count,
int window_height,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights_squared;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_height; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = output[offset] * central_mult + res;
}
}
template<int WINDOW_HEIGHT>
__global__ void local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights_squared,
float central_mult,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights_squared;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_HEIGHT; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = output[offset] * central_mult + res;
}
}
namespace nnforge
{
namespace cuda
{
local_contrast_subtractive_2d_layer_hessian_cuda::local_contrast_subtractive_2d_layer_hessian_cuda()
{
}
local_contrast_subtractive_2d_layer_hessian_cuda::~local_contrast_subtractive_2d_layer_hessian_cuda()
{
}
void local_contrast_subtractive_2d_layer_hessian_cuda::enqueue_test(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_1st_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[0])
{
case 1:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<1>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<2>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<3>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<4>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<5>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<6>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<7>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<8>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<9>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<10>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_hess_kernel), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id,
*input_neurons_buffer,
*additional_buffers[0],
*schema_data[0],
*schema_data[1],
input_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[0],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
std::pair<dim3, dim3> kernel_2nd_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[1])
{
case 1:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<1>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<2>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<3>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<4>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<5>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<6>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<7>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<8>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<9>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<10>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_hess_kernel), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id,
*additional_buffers[0],
*input_neurons_buffer,
*output_neurons_buffer,
*schema_data[0],
*schema_data[2],
input_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[1],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
if (unaffected_feature_map_count > 0)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
input_elem_count_per_feature_map,
unaffected_feature_map_count,
entry_count);
hipLaunchKernelGGL(( local_contrast_subtractive_2d_copy_unaffected_hess_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_neurons_buffer,
*output_neurons_buffer,
*schema_data[5],
input_configuration_specific.feature_map_count,
unaffected_feature_map_count,
input_elem_count_per_feature_map,
entry_count);
}
}
void local_contrast_subtractive_2d_layer_hessian_cuda::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_1st_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[0])
{
case 1:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<1>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<2>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<3>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<4>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<5>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<6>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<7>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<8>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<9>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<10>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_hess_kernel), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id,
*output_errors_buffer,
*additional_buffers[0],
*schema_data[0],
*schema_data[3],
input_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[0],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
std::pair<dim3, dim3> kernel_2nd_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[1])
{
case 1:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<1>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<2>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<3>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<4>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<5>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<6>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<7>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<8>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<9>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<10>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_hess_kernel), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id,
*additional_buffers[0],
*output_errors_buffer,
*schema_data[0],
*schema_data[4],
central_mult,
input_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[1],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
}
void local_contrast_subtractive_2d_layer_hessian_cuda::hessian_configured()
{
std::tr1::shared_ptr<const local_contrast_subtractive_layer> layer_derived = std::tr1::dynamic_pointer_cast<const local_contrast_subtractive_layer>(layer_schema);
affected_feature_map_count = static_cast<int>(layer_derived->feature_maps_affected.size());
unaffected_feature_map_count = static_cast<int>(layer_derived->feature_maps_unaffected.size());
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
half_window_sizes.push_back(static_cast<int>((*it + 1) >> 1));
central_mult = 1.0F - (2.0F * layer_derived->window_weights_list[0][0] * layer_derived->window_weights_list[1][0]);
}
std::vector<size_t> local_contrast_subtractive_2d_layer_hessian_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(input_elem_count_per_feature_map * affected_feature_map_count * sizeof(float));
return res;
}
bool local_contrast_subtractive_2d_layer_hessian_cuda::is_in_place_backprop() const
{
return true;
}
}
}
| 5fd84cd23d5a0db10c229b171bc53bc61a8fdd5d.cu | /*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "local_contrast_subtractive_2d_layer_hessian_cuda.h"
#include "../local_contrast_subtractive_layer.h"
#include "util_cuda.h"
__global__ void local_contrast_subtractive_2d_blur_horizontal_hess_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int window_width,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_width; ++i)
{
current_weights++;
if (i < x + 1)
current_input_low--;
if (i > x + 1)
current_input_low++;
if (i < width - x)
current_input_high++;
if (i > width - x)
current_input_high--;
res += (*current_input_low + *current_input_high) * *current_weights;
}
output[(z * height + y) * width + x] = res;
}
}
template<int WINDOW_WIDTH>
__global__ void local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
current_weights++;
if (i < x + 1)
current_input_low--;
if (i > x + 1)
current_input_low++;
if (i < width - x)
current_input_high++;
if (i > width - x)
current_input_high--;
res += (*current_input_low + *current_input_high) * *current_weights;
}
output[(z * height + y) * width + x] = res;
}
}
__global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_hess_kernel(
const float * __restrict input,
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int window_height,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_height; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = original_input[offset] - res;
}
}
template<int WINDOW_HEIGHT>
__global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel(
const float * __restrict input,
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_HEIGHT; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = original_input[offset] - res;
}
}
__global__ void local_contrast_subtractive_2d_copy_unaffected_hess_kernel(
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict unaffected_feature_map_list,
int input_feature_map_count,
int unaffected_feature_map_count,
int elem_count_per_fature_map,
int entry_count)
{
int elem_id = blockIdx.x * blockDim.x + threadIdx.x;
int unaffected_feature_map_index = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (elem_id < elem_count_per_fature_map) && (unaffected_feature_map_index < unaffected_feature_map_count) && (entry_id < entry_count);
if (in_bounds)
{
int unaffected_feature_map_id = unaffected_feature_map_list[unaffected_feature_map_index];
int offset = (entry_id * input_feature_map_count + unaffected_feature_map_id) * elem_count_per_fature_map + elem_id;
output[offset] = original_input[offset];
}
}
__global__ void local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_hess_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights_squared,
float central_mult,
int input_feature_map_count,
int affected_feature_map_count,
int window_height,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights_squared;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_height; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = output[offset] * central_mult + res;
}
}
template<int WINDOW_HEIGHT>
__global__ void local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights_squared,
float central_mult,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights_squared;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_HEIGHT; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = output[offset] * central_mult + res;
}
}
namespace nnforge
{
namespace cuda
{
local_contrast_subtractive_2d_layer_hessian_cuda::local_contrast_subtractive_2d_layer_hessian_cuda()
{
}
local_contrast_subtractive_2d_layer_hessian_cuda::~local_contrast_subtractive_2d_layer_hessian_cuda()
{
}
void local_contrast_subtractive_2d_layer_hessian_cuda::enqueue_test(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_1st_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[0])
{
case 1:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<1><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<2><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<3><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<4><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<5><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<6><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<7><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<8><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<9><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<10><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_neurons_buffer, *additional_buffers[0], *schema_data[0], *schema_data[1], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
local_contrast_subtractive_2d_blur_horizontal_hess_kernel<<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(
*input_neurons_buffer,
*additional_buffers[0],
*schema_data[0],
*schema_data[1],
input_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[0],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
std::pair<dim3, dim3> kernel_2nd_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[1])
{
case 1:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<1><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<2><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<3><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<4><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<5><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<6><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<7><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<8><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<9><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_hess_kernel<10><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], *schema_data[2], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
local_contrast_subtractive_2d_blur_vertical_and_subtract_hess_kernel<<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(
*additional_buffers[0],
*input_neurons_buffer,
*output_neurons_buffer,
*schema_data[0],
*schema_data[2],
input_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[1],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
if (unaffected_feature_map_count > 0)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
input_elem_count_per_feature_map,
unaffected_feature_map_count,
entry_count);
local_contrast_subtractive_2d_copy_unaffected_hess_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_neurons_buffer,
*output_neurons_buffer,
*schema_data[5],
input_configuration_specific.feature_map_count,
unaffected_feature_map_count,
input_elem_count_per_feature_map,
entry_count);
}
}
void local_contrast_subtractive_2d_layer_hessian_cuda::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_1st_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[0])
{
case 1:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<1><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<2><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<3><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<4><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<5><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<6><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<7><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<8><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<9><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
local_contrast_subtractive_2d_blur_horizontal_exact_hess_kernel<10><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*output_errors_buffer, *additional_buffers[0], *schema_data[0], *schema_data[3], input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
local_contrast_subtractive_2d_blur_horizontal_hess_kernel<<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(
*output_errors_buffer,
*additional_buffers[0],
*schema_data[0],
*schema_data[3],
input_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[0],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
std::pair<dim3, dim3> kernel_2nd_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[1])
{
case 1:
local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<1><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<2><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<3><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<4><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<5><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<6><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<7><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<8><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<9><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_exact_hess_kernel<10><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*additional_buffers[0], *output_errors_buffer, *schema_data[0], *schema_data[4], central_mult, input_configuration_specific.feature_map_count, affected_feature_map_count, input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
local_contrast_subtractive_2d_square_deriviative_blur_vertical_and_add_hess_kernel<<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(
*additional_buffers[0],
*output_errors_buffer,
*schema_data[0],
*schema_data[4],
central_mult,
input_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[1],
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
}
void local_contrast_subtractive_2d_layer_hessian_cuda::hessian_configured()
{
std::tr1::shared_ptr<const local_contrast_subtractive_layer> layer_derived = std::tr1::dynamic_pointer_cast<const local_contrast_subtractive_layer>(layer_schema);
affected_feature_map_count = static_cast<int>(layer_derived->feature_maps_affected.size());
unaffected_feature_map_count = static_cast<int>(layer_derived->feature_maps_unaffected.size());
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
half_window_sizes.push_back(static_cast<int>((*it + 1) >> 1));
central_mult = 1.0F - (2.0F * layer_derived->window_weights_list[0][0] * layer_derived->window_weights_list[1][0]);
}
std::vector<size_t> local_contrast_subtractive_2d_layer_hessian_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(input_elem_count_per_feature_map * affected_feature_map_count * sizeof(float));
return res;
}
bool local_contrast_subtractive_2d_layer_hessian_cuda::is_in_place_backprop() const
{
return true;
}
}
}
|
5a66813cbfbeb91060cab1754c48a73f2723a213.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <rocblas.h>
#include <cusolverDn.h>
#include "../cublasHelper.h"
#include <exceptions/cuda_exception.h>
#include <helpers/logger.h>
#include <execution/AffinityManager.h>
#include "config.h"
#ifdef HAVE_CUDNN
#include <cudnn.h>
#endif
namespace sd {
std::mutex CublasHelper::_mutex;
static void* handle_() {
auto _handle = new hipblasHandle_t();
auto status = hipblasCreate(_handle); // initialize CUBLAS context
if (status != HIPBLAS_STATUS_SUCCESS)
throw cuda_exception::build("cuBLAS handle creation failed !", status);
return reinterpret_cast<void *>(_handle);
}
static void* solver_() {
auto cusolverH = new hipsolverDnHandle_t();
auto status = hipsolverDnCreate(cusolverH);
if (status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("cuSolver handle creation failed !", status);
return cusolverH;
}
static void* cudnn_() {
#ifdef HAVE_CUDNN
auto cudnnH = new cudnnHandle_t();
auto status = cudnnCreate(cudnnH);
if (status != CUDNN_STATUS_SUCCESS)
throw cuda_exception::build("cuDNN handle creation failed !", status);
return cudnnH;
#endif
return nullptr;
}
static void destroyHandle_(void* handle) {
auto ch = reinterpret_cast<hipblasHandle_t *>(handle);
auto status = hipblasDestroy(*ch);
if (status != HIPBLAS_STATUS_SUCCESS)
throw cuda_exception::build("cuBLAS handle destruction failed !", status);
delete ch;
}
CublasHelper::CublasHelper() {
//nd4j_printf("Initializing cuBLAS\n","");
auto numDevices = AffinityManager::numberOfDevices();
auto currentDevice = AffinityManager::currentDeviceId();
_cache.resize(numDevices);
_solvers.resize(numDevices);
_cudnn.resize(numDevices);
for (int e = 0; e < numDevices; e++) {
AffinityManager::setCurrentNativeDevice(e);
_cache[e] = handle_();
_solvers[e] = solver_();
_cudnn[e] = cudnn_();
}
// don't forget to restore back original device
AffinityManager::setCurrentNativeDevice(currentDevice);
}
CublasHelper::~CublasHelper() {
auto numDevices = AffinityManager::numberOfDevices();
for (int e = 0; e < numDevices; e++)
destroyHandle_(_cache[e]);
}
CublasHelper& CublasHelper::getInstance() {
static CublasHelper instance;
return instance;
}
void* CublasHelper::cudnn() {
auto deviceId = AffinityManager::currentDeviceId();
if (deviceId < 0 || deviceId > _cudnn.size())
throw cuda_exception::build("requested deviceId doesn't look valid", deviceId);
return _cudnn[deviceId];
}
void* CublasHelper::handle() {
auto deviceId = AffinityManager::currentDeviceId();
return handle(deviceId);
}
void* CublasHelper::solver() {
auto deviceId = AffinityManager::currentDeviceId();
if (deviceId < 0 || deviceId > _solvers.size())
throw cuda_exception::build("requested deviceId doesn't look valid", deviceId);
return _solvers[deviceId];
}
void* CublasHelper::handle(int deviceId) {
if (deviceId < 0 || deviceId > _cache.size())
throw cuda_exception::build("requested deviceId doesn't look valid", deviceId);
return _cache[deviceId];
}
} | 5a66813cbfbeb91060cab1754c48a73f2723a213.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "../cublasHelper.h"
#include <exceptions/cuda_exception.h>
#include <helpers/logger.h>
#include <execution/AffinityManager.h>
#include "config.h"
#ifdef HAVE_CUDNN
#include <cudnn.h>
#endif
namespace sd {
std::mutex CublasHelper::_mutex;
static void* handle_() {
auto _handle = new cublasHandle_t();
auto status = cublasCreate_v2(_handle); // initialize CUBLAS context
if (status != CUBLAS_STATUS_SUCCESS)
throw cuda_exception::build("cuBLAS handle creation failed !", status);
return reinterpret_cast<void *>(_handle);
}
static void* solver_() {
auto cusolverH = new cusolverDnHandle_t();
auto status = cusolverDnCreate(cusolverH);
if (status != CUSOLVER_STATUS_SUCCESS)
throw cuda_exception::build("cuSolver handle creation failed !", status);
return cusolverH;
}
static void* cudnn_() {
#ifdef HAVE_CUDNN
auto cudnnH = new cudnnHandle_t();
auto status = cudnnCreate(cudnnH);
if (status != CUDNN_STATUS_SUCCESS)
throw cuda_exception::build("cuDNN handle creation failed !", status);
return cudnnH;
#endif
return nullptr;
}
static void destroyHandle_(void* handle) {
auto ch = reinterpret_cast<cublasHandle_t *>(handle);
auto status = cublasDestroy_v2(*ch);
if (status != CUBLAS_STATUS_SUCCESS)
throw cuda_exception::build("cuBLAS handle destruction failed !", status);
delete ch;
}
CublasHelper::CublasHelper() {
//nd4j_printf("Initializing cuBLAS\n","");
auto numDevices = AffinityManager::numberOfDevices();
auto currentDevice = AffinityManager::currentDeviceId();
_cache.resize(numDevices);
_solvers.resize(numDevices);
_cudnn.resize(numDevices);
for (int e = 0; e < numDevices; e++) {
AffinityManager::setCurrentNativeDevice(e);
_cache[e] = handle_();
_solvers[e] = solver_();
_cudnn[e] = cudnn_();
}
// don't forget to restore back original device
AffinityManager::setCurrentNativeDevice(currentDevice);
}
CublasHelper::~CublasHelper() {
auto numDevices = AffinityManager::numberOfDevices();
for (int e = 0; e < numDevices; e++)
destroyHandle_(_cache[e]);
}
CublasHelper& CublasHelper::getInstance() {
static CublasHelper instance;
return instance;
}
void* CublasHelper::cudnn() {
auto deviceId = AffinityManager::currentDeviceId();
if (deviceId < 0 || deviceId > _cudnn.size())
throw cuda_exception::build("requested deviceId doesn't look valid", deviceId);
return _cudnn[deviceId];
}
void* CublasHelper::handle() {
auto deviceId = AffinityManager::currentDeviceId();
return handle(deviceId);
}
void* CublasHelper::solver() {
auto deviceId = AffinityManager::currentDeviceId();
if (deviceId < 0 || deviceId > _solvers.size())
throw cuda_exception::build("requested deviceId doesn't look valid", deviceId);
return _solvers[deviceId];
}
void* CublasHelper::handle(int deviceId) {
if (deviceId < 0 || deviceId > _cache.size())
throw cuda_exception::build("requested deviceId doesn't look valid", deviceId);
return _cache[deviceId];
}
} |
8f0213db28ff047e44f664735faea4755a69c935.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
*
* Created on: 27.6.2011
* Author: Teemu Rantalaiho ([email protected])
*
*
* Copyright 2011 Teemu Rantalaiho
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
* Compile with:
*
* nvcc -O4 -arch=<your_arch> -I../ test_mem_multi16.cu -o test_mem_multi16
*
*
*/
#define TESTMAXIDX 256 // default 256 keys (change this) / indices
#define TEST_IS_POW2 1 // Note: This tells is TESTMAXIDX is power of two number
#define TEST_SIZE (625 * 100 * 1000 ) // 62.5 million inputs -> 1000 million keys
#define NRUNS 100 // Repeat 100 times => 100 Gigainputs in total (16 keys per entry)
#define START_INDEX 0
#define NSTRESS_RUNS NRUNS
#define ENABLE_THRUST 0 // Enable thrust-based version also (xform-sort_by_key-reduce_by_key)
#include "cuda_histogram.h"
#if ENABLE_THRUST
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/sort.h>
#endif
#include <assert.h>
#include <stdio.h>
// Always return 1 -> normal histogram - each sample has same weight
struct test_xform2
{
__host__ __device__
void operator() (uint4* input, int i, int* result_index, int* results, int nresults) const {
uint4 idata = input[i];
#pragma unroll
for (int resIdx = 0; resIdx < 4; resIdx++)
{
unsigned int data = ((unsigned int*)(&idata))[resIdx];
#if TESTMAXIDX < 256
#if TEST_IS_POW2
*result_index++ = ((data >> 24) & 0xFF) & (TESTMAXIDX - 1);
*result_index++ = ((data >> 16) & 0xFF) & (TESTMAXIDX - 1);
*result_index++ = ((data >> 8) & 0xFF) & (TESTMAXIDX - 1);
*result_index++ = ((data >> 0) & 0xFF) & (TESTMAXIDX - 1);
#else
*result_index++ = ((data >> 24) & 0xFF) % (TESTMAXIDX);
*result_index++ = ((data >> 16) & 0xFF) % (TESTMAXIDX);
*result_index++ = ((data >> 8) & 0xFF) % (TESTMAXIDX);
*result_index++ = ((data >> 0) & 0xFF) % (TESTMAXIDX);
#endif
#else
*result_index++ = ((data >> 24));
*result_index++ = ((data >> 16) & 0xFF);
*result_index++ = ((data >> 8) & 0xFF);
*result_index++ = ((data >> 0) & 0xFF);
#endif
*results++ = 1;
*results++ = 1;
*results++ = 1;
*results++ = 1;
}
}
};
struct test_sumfun2 {
__device__ __host__
int operator() (int res1, int res2) const{
return res1 + res2;
}
};
static void printres (int* res, int nres, const char* descr)
{
if (descr)
printf("\n%s:\n", descr);
printf("vals = [ ");
for (int i = 0; i < nres; i++)
printf("(%d), ", res[i]);
printf("]\n");
}
static void testHistogramParam(uint4* INPUT, uint4* hostINPUT, int index_0, int index_1, bool print, bool cpurun, bool stress, void* tmpBuffer)
{
int nIndex = TESTMAXIDX;
int srun;
int nruns = stress ? NSTRESS_RUNS : 1;
test_sumfun2 sumFun;
test_xform2 transformFun;
//test_indexfun2 indexFun;
int* tmpres = (int*)malloc(sizeof(int) * nIndex);
int* cpures = stress ? (int*)malloc(sizeof(int) * nIndex) : tmpres;
int zero = 0;
for (srun = 0; srun < nruns; srun++)
{
{
int* tmpidx = (int*)malloc(sizeof(int) * nIndex);
if (print)
printf("\nTest reduce_by_key:\n\n");
memset(tmpres, 0, sizeof(int) * nIndex);
if (stress)
memset(cpures, 0, sizeof(int) * nIndex);
if (cpurun || stress)
for (int i = index_0; i < index_1; i++)
{
int index[16];
int tmp[16];
transformFun(hostINPUT, i, &index[0], &tmp[0], 1);
//index = indexFun(INPUT, i);
for (int tmpi = 0; tmpi < 16; tmpi++)
cpures[index[tmpi]] = sumFun(cpures[index[tmpi]], tmp[tmpi]);
//printf("i = %d, out_index = %d, out_val = (%.3f, %.3f) \n",i, index, tmp.real, tmp.imag);
}
if (print && cpurun)
{
printres(cpures, nIndex, "CPU results:");
}
}
if (!cpurun)
callHistogramKernel<histogram_atomic_inc, 16>(INPUT, transformFun, /*indexFun,*/ sumFun, index_0, index_1, zero, tmpres, nIndex, false, 0, tmpBuffer);
if (stress)
{
int k;
for (k = 0; k < nIndex; k++)
{
if (tmpres[k] != cpures[k] /*|| tmpres[k].imag != cpures[k].imag*/)
{
printf("Error detected with index-values: i0 = %d, i1 = %d!\n", index_0, index_1);
printres(cpures, nIndex, "CPU results:");
printres(tmpres, nIndex, "GPU results:");
}
}
}
if (print && (!cpurun))
{
printres(tmpres, nIndex, "GPU results:");
}
int size = index_1 - index_0;
index_0 += 1;
index_1 -= 1;
if (index_0 > index_1 + 1)
{
int tmp = index_0;
index_0 = index_1;
index_1 = tmp;
}
if (index_0 < 0 || index_1 < 0) {
index_0 = 0;
index_1 = size - 1;
}
}
free(tmpres);
if (stress)
free(cpures);
}
#if ENABLE_THRUST
// NOTE: Take advantage here of the fact that this is the classical histogram with all values = 1
// And also that we know before hand the number of indices coming out
static void testHistogramParamThrust(int* INPUT, int index_0, int index_1, bool print)
{
test_sumfun2 mysumfun;
thrust::equal_to<int> binary_pred;
int nIndex = TESTMAXIDX;
int N = index_1 - index_0;
thrust::device_vector<int> keys_out(nIndex);
thrust::device_vector<int> vals_out(nIndex);
thrust::device_vector<int> h_vals_out(nIndex);
//thrust::device_vector<int> keys(N);
thrust::device_ptr<int> keys(INPUT);
// Sort the data
thrust::sort(keys, keys + N);
// And reduce by key - histogram complete
thrust::reduce_by_key(keys, keys + N, thrust::make_constant_iterator(1), keys_out.begin(), vals_out.begin(), binary_pred, mysumfun);
h_vals_out = vals_out;
if (print)
{
printf("\nThrust results:\n");
printf("vals = [ ");
for (int i = 0; i < nIndex; i++)
{
int tmp = h_vals_out[i];
printf("(%d), ", tmp);
}
printf("]\n");
}
}
#endif
void printUsage(void)
{
printf("\n");
printf("Test order independent reduce-by-key / histogram algorithm\n\n");
printf("By default this runs on custom algorithm on the GPU, with lots of equal consecutive keys\n\n");
printf("\tOptions:\n\n");
printf("\t\t--cpu\t\t Run on CPU serially instead of GPU\n");
printf("\t\t--print\t\t Print results of algorithm (check validity)\n");
printf("\t\t--thrust\t Run on GPU but using thrust library\n");
printf("\t\t--load\t Use 32-bit texture data s\n");
printf("\t\t--rnd\t Take uniform random keys s\n");
// printf("\t\t--sharp\t Make peaks sharp\n");
// printf("\t\t--nornd\t Remove random noise from input\n");
}
static
unsigned int* MyTexture_load(char* filename, int* dataSize)
{
FILE* file = fopen(filename, "rb");
//texture->dataRGBA8888 = NULL;
if (!file)
{
char* tmp = (char*)malloc(strlen(filename) + 10);
if (tmp)
{
char* ptr = tmp;
strcpy(ptr, "../");
ptr += 3;
strcpy(ptr, filename);
file = fopen(tmp, "rb");
}
}
// Read
if (file)
{
int npixels = 512 * 512;//texture->width * texture->height;
int size = npixels * 4;
unsigned int* data = (unsigned int*)malloc(size);
*dataSize = npixels;
if (data)
{
int i;
for (i = 0; i < npixels; i++)
{
unsigned int r, g, b;
unsigned int raw = 0;
unsigned int pixel = 0;
int rsize = fread(&raw, 3, 1, file);
if (rsize != 1)
{
printf(
"Warning: Unexpected EOF in texture %s at idx %d\n",
filename, i);
break;
}
r = (raw & 0x00FF0000) >> 16;
g = (raw & 0x0000FF00) >> 8;
b = (raw & 0x000000FF) >> 0;
pixel = 0xFF000000 | (b << 16) | (g << 8) | (r << 0);
data[i] = pixel;
}
}
fclose(file);
return data;
}
return NULL;
}
static inline int getInput(size_t i, unsigned int* texData, int dataSize, bool rnd)
{
if (texData)
{
static size_t index = i % dataSize;
static int round = 0;
unsigned int val = texData[index];
int result;
result = val + round;
index++;
if (index >= dataSize)
{
index = 0;
round += 7;
}
#if TEST_IS_POW2
result = (int)(result);
#else
result = (int)(result);
#endif
return result;
}
else
{
static unsigned int current = 0xf1232345;
const unsigned int mult = 1664525;
const unsigned int add = 1013904223ul;
//int peakWidth = sharp ? 100 : 10000;
// static int nextPeak = 200;
// static int currentBase = 0;
// static int currentWidth = TESTMAXIDX;
current = current * mult + add;
if (!rnd)
current = i / 100;
#if TEST_IS_POW2
i = (int)(current);
#else
i = (int)(current);
#endif
return i;
}
}
static void fillInput(int* input, bool load, bool rnd)
{
size_t i;
unsigned int* texData = NULL;
int dataSize = 0;
if (load && !rnd)
{
texData = MyTexture_load("texture.raw", &dataSize);
}
for (i = 0; i < TEST_SIZE * 4;)
{
*input++ = getInput(i++, texData, dataSize, rnd);
*input++ = getInput(i++, texData, dataSize, rnd);
*input++ = getInput(i++, texData, dataSize, rnd);
*input++ = getInput(i++, texData, dataSize, rnd);
}
if (texData) free(texData);
}
int main (int argc, char** argv)
{
int i;
int index_0 = START_INDEX;
int index_1 = index_0 + TEST_SIZE;
bool cpu = false;
bool print = false;
bool thrust = false;
bool stress = false;
// bool peaks = false;
// bool sharp = false;
bool rnd = false;
bool load = false;
printUsage();
for (i = 0; i < argc; i++)
{
if (argv[i] && strcmp(argv[i], "--cpu") == 0)
cpu = true;
if (argv[i] && strcmp(argv[i], "--print") == 0)
print = true;
if (argv[i] && strcmp(argv[i], "--thrust") == 0)
thrust = true;
if (argv[i] && strcmp(argv[i], "--stress") == 0)
stress = true;
// if (argv[i] && strcmp(argv[i], "--peaks") == 0)
// peaks = true;
if (argv[i] && strcmp(argv[i], "--load") == 0)
load = true;
// if (argv[i] && strcmp(argv[i], "--sharp") == 0)
// sharp = true;
if (argv[i] && strcmp(argv[i], "--rnd") == 0)
rnd = true;
}
{
// Allocate keys:
int* INPUT = NULL;
int* hostINPUT = (int*)malloc(4 * sizeof(int) * (TEST_SIZE + 3));
assert(hostINPUT);
fillInput(hostINPUT, load, rnd);
if (!cpu)
{
hipMalloc(&INPUT, 4 * sizeof(int) * TEST_SIZE);
assert(INPUT);
hipMemcpy(INPUT, hostINPUT, 4 * sizeof(int) * TEST_SIZE, hipMemcpyHostToDevice);
}
void* tmpBuffer = NULL;
int zero = 0;
int tmpbufsize = getHistogramBufSize<histogram_atomic_inc>(zero , (int)TESTMAXIDX);
hipMalloc(&tmpBuffer, tmpbufsize);
// Create events for timing:
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Now start timer - we run on stream 0 (default stream):
hipEventRecord(start, 0);
for (i = 0; i < NRUNS; i++)
{
if (thrust)
{
#if ENABLE_THRUST
testHistogramParamThrust(INPUT, index_0, index_1, print);
#else
printf("\nTest was compiled without thrust support! Find 'ENABLE_THRUST' in source-code!\n\n Exiting...\n");
break;
#endif
}
else
{
testHistogramParam((uint4*)INPUT, (uint4*)hostINPUT, index_0, index_1, print, cpu, stress, tmpBuffer);
}
print = false;
// Run only once all stress-tests
if (stress) break;
}
{
float t_ms;
hipEventRecord(stop, 0);
hipDeviceSynchronize();
hipEventElapsedTime(&t_ms, start, stop);
double t = t_ms * 0.001f;
double GKps = (((double)TEST_SIZE * (double)NRUNS * 16.0)) / (t*1.e9);
printf("Runtime in loops: %fs, Throughput (Gkeys/s): %3f GK/s \n", t, GKps);
}
if (INPUT) hipFree(INPUT);
if (hostINPUT) free(hostINPUT);
if (tmpBuffer) hipFree(tmpBuffer);
hipEventDestroy(start);
hipEventDestroy(stop);
}
return 0;
}
| 8f0213db28ff047e44f664735faea4755a69c935.cu | /*
*
*
* Created on: 27.6.2011
* Author: Teemu Rantalaiho ([email protected])
*
*
* Copyright 2011 Teemu Rantalaiho
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
* Compile with:
*
* nvcc -O4 -arch=<your_arch> -I../ test_mem_multi16.cu -o test_mem_multi16
*
*
*/
#define TESTMAXIDX 256 // default 256 keys (change this) / indices
#define TEST_IS_POW2 1 // Note: This tells is TESTMAXIDX is power of two number
#define TEST_SIZE (625 * 100 * 1000 ) // 62.5 million inputs -> 1000 million keys
#define NRUNS 100 // Repeat 100 times => 100 Gigainputs in total (16 keys per entry)
#define START_INDEX 0
#define NSTRESS_RUNS NRUNS
#define ENABLE_THRUST 0 // Enable thrust-based version also (xform-sort_by_key-reduce_by_key)
#include "cuda_histogram.h"
#if ENABLE_THRUST
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/sort.h>
#endif
#include <assert.h>
#include <stdio.h>
// Always return 1 -> normal histogram - each sample has same weight
struct test_xform2
{
__host__ __device__
void operator() (uint4* input, int i, int* result_index, int* results, int nresults) const {
uint4 idata = input[i];
#pragma unroll
for (int resIdx = 0; resIdx < 4; resIdx++)
{
unsigned int data = ((unsigned int*)(&idata))[resIdx];
#if TESTMAXIDX < 256
#if TEST_IS_POW2
*result_index++ = ((data >> 24) & 0xFF) & (TESTMAXIDX - 1);
*result_index++ = ((data >> 16) & 0xFF) & (TESTMAXIDX - 1);
*result_index++ = ((data >> 8) & 0xFF) & (TESTMAXIDX - 1);
*result_index++ = ((data >> 0) & 0xFF) & (TESTMAXIDX - 1);
#else
*result_index++ = ((data >> 24) & 0xFF) % (TESTMAXIDX);
*result_index++ = ((data >> 16) & 0xFF) % (TESTMAXIDX);
*result_index++ = ((data >> 8) & 0xFF) % (TESTMAXIDX);
*result_index++ = ((data >> 0) & 0xFF) % (TESTMAXIDX);
#endif
#else
*result_index++ = ((data >> 24));
*result_index++ = ((data >> 16) & 0xFF);
*result_index++ = ((data >> 8) & 0xFF);
*result_index++ = ((data >> 0) & 0xFF);
#endif
*results++ = 1;
*results++ = 1;
*results++ = 1;
*results++ = 1;
}
}
};
struct test_sumfun2 {
__device__ __host__
int operator() (int res1, int res2) const{
return res1 + res2;
}
};
static void printres (int* res, int nres, const char* descr)
{
if (descr)
printf("\n%s:\n", descr);
printf("vals = [ ");
for (int i = 0; i < nres; i++)
printf("(%d), ", res[i]);
printf("]\n");
}
static void testHistogramParam(uint4* INPUT, uint4* hostINPUT, int index_0, int index_1, bool print, bool cpurun, bool stress, void* tmpBuffer)
{
int nIndex = TESTMAXIDX;
int srun;
int nruns = stress ? NSTRESS_RUNS : 1;
test_sumfun2 sumFun;
test_xform2 transformFun;
//test_indexfun2 indexFun;
int* tmpres = (int*)malloc(sizeof(int) * nIndex);
int* cpures = stress ? (int*)malloc(sizeof(int) * nIndex) : tmpres;
int zero = 0;
for (srun = 0; srun < nruns; srun++)
{
{
int* tmpidx = (int*)malloc(sizeof(int) * nIndex);
if (print)
printf("\nTest reduce_by_key:\n\n");
memset(tmpres, 0, sizeof(int) * nIndex);
if (stress)
memset(cpures, 0, sizeof(int) * nIndex);
if (cpurun || stress)
for (int i = index_0; i < index_1; i++)
{
int index[16];
int tmp[16];
transformFun(hostINPUT, i, &index[0], &tmp[0], 1);
//index = indexFun(INPUT, i);
for (int tmpi = 0; tmpi < 16; tmpi++)
cpures[index[tmpi]] = sumFun(cpures[index[tmpi]], tmp[tmpi]);
//printf("i = %d, out_index = %d, out_val = (%.3f, %.3f) \n",i, index, tmp.real, tmp.imag);
}
if (print && cpurun)
{
printres(cpures, nIndex, "CPU results:");
}
}
if (!cpurun)
callHistogramKernel<histogram_atomic_inc, 16>(INPUT, transformFun, /*indexFun,*/ sumFun, index_0, index_1, zero, tmpres, nIndex, false, 0, tmpBuffer);
if (stress)
{
int k;
for (k = 0; k < nIndex; k++)
{
if (tmpres[k] != cpures[k] /*|| tmpres[k].imag != cpures[k].imag*/)
{
printf("Error detected with index-values: i0 = %d, i1 = %d!\n", index_0, index_1);
printres(cpures, nIndex, "CPU results:");
printres(tmpres, nIndex, "GPU results:");
}
}
}
if (print && (!cpurun))
{
printres(tmpres, nIndex, "GPU results:");
}
int size = index_1 - index_0;
index_0 += 1;
index_1 -= 1;
if (index_0 > index_1 + 1)
{
int tmp = index_0;
index_0 = index_1;
index_1 = tmp;
}
if (index_0 < 0 || index_1 < 0) {
index_0 = 0;
index_1 = size - 1;
}
}
free(tmpres);
if (stress)
free(cpures);
}
#if ENABLE_THRUST
// NOTE: Take advantage here of the fact that this is the classical histogram with all values = 1
// And also that we know before hand the number of indices coming out
static void testHistogramParamThrust(int* INPUT, int index_0, int index_1, bool print)
{
test_sumfun2 mysumfun;
thrust::equal_to<int> binary_pred;
int nIndex = TESTMAXIDX;
int N = index_1 - index_0;
thrust::device_vector<int> keys_out(nIndex);
thrust::device_vector<int> vals_out(nIndex);
thrust::device_vector<int> h_vals_out(nIndex);
//thrust::device_vector<int> keys(N);
thrust::device_ptr<int> keys(INPUT);
// Sort the data
thrust::sort(keys, keys + N);
// And reduce by key - histogram complete
thrust::reduce_by_key(keys, keys + N, thrust::make_constant_iterator(1), keys_out.begin(), vals_out.begin(), binary_pred, mysumfun);
h_vals_out = vals_out;
if (print)
{
printf("\nThrust results:\n");
printf("vals = [ ");
for (int i = 0; i < nIndex; i++)
{
int tmp = h_vals_out[i];
printf("(%d), ", tmp);
}
printf("]\n");
}
}
#endif
void printUsage(void)
{
printf("\n");
printf("Test order independent reduce-by-key / histogram algorithm\n\n");
printf("By default this runs on custom algorithm on the GPU, with lots of equal consecutive keys\n\n");
printf("\tOptions:\n\n");
printf("\t\t--cpu\t\t Run on CPU serially instead of GPU\n");
printf("\t\t--print\t\t Print results of algorithm (check validity)\n");
printf("\t\t--thrust\t Run on GPU but using thrust library\n");
printf("\t\t--load\t Use 32-bit texture data s\n");
printf("\t\t--rnd\t Take uniform random keys s\n");
// printf("\t\t--sharp\t Make peaks sharp\n");
// printf("\t\t--nornd\t Remove random noise from input\n");
}
static
unsigned int* MyTexture_load(char* filename, int* dataSize)
{
FILE* file = fopen(filename, "rb");
//texture->dataRGBA8888 = NULL;
if (!file)
{
char* tmp = (char*)malloc(strlen(filename) + 10);
if (tmp)
{
char* ptr = tmp;
strcpy(ptr, "../");
ptr += 3;
strcpy(ptr, filename);
file = fopen(tmp, "rb");
}
}
// Read
if (file)
{
int npixels = 512 * 512;//texture->width * texture->height;
int size = npixels * 4;
unsigned int* data = (unsigned int*)malloc(size);
*dataSize = npixels;
if (data)
{
int i;
for (i = 0; i < npixels; i++)
{
unsigned int r, g, b;
unsigned int raw = 0;
unsigned int pixel = 0;
int rsize = fread(&raw, 3, 1, file);
if (rsize != 1)
{
printf(
"Warning: Unexpected EOF in texture %s at idx %d\n",
filename, i);
break;
}
r = (raw & 0x00FF0000) >> 16;
g = (raw & 0x0000FF00) >> 8;
b = (raw & 0x000000FF) >> 0;
pixel = 0xFF000000 | (b << 16) | (g << 8) | (r << 0);
data[i] = pixel;
}
}
fclose(file);
return data;
}
return NULL;
}
static inline int getInput(size_t i, unsigned int* texData, int dataSize, bool rnd)
{
if (texData)
{
static size_t index = i % dataSize;
static int round = 0;
unsigned int val = texData[index];
int result;
result = val + round;
index++;
if (index >= dataSize)
{
index = 0;
round += 7;
}
#if TEST_IS_POW2
result = (int)(result);
#else
result = (int)(result);
#endif
return result;
}
else
{
static unsigned int current = 0xf1232345;
const unsigned int mult = 1664525;
const unsigned int add = 1013904223ul;
//int peakWidth = sharp ? 100 : 10000;
// static int nextPeak = 200;
// static int currentBase = 0;
// static int currentWidth = TESTMAXIDX;
current = current * mult + add;
if (!rnd)
current = i / 100;
#if TEST_IS_POW2
i = (int)(current);
#else
i = (int)(current);
#endif
return i;
}
}
static void fillInput(int* input, bool load, bool rnd)
{
size_t i;
unsigned int* texData = NULL;
int dataSize = 0;
if (load && !rnd)
{
texData = MyTexture_load("texture.raw", &dataSize);
}
for (i = 0; i < TEST_SIZE * 4;)
{
*input++ = getInput(i++, texData, dataSize, rnd);
*input++ = getInput(i++, texData, dataSize, rnd);
*input++ = getInput(i++, texData, dataSize, rnd);
*input++ = getInput(i++, texData, dataSize, rnd);
}
if (texData) free(texData);
}
int main (int argc, char** argv)
{
int i;
int index_0 = START_INDEX;
int index_1 = index_0 + TEST_SIZE;
bool cpu = false;
bool print = false;
bool thrust = false;
bool stress = false;
// bool peaks = false;
// bool sharp = false;
bool rnd = false;
bool load = false;
printUsage();
for (i = 0; i < argc; i++)
{
if (argv[i] && strcmp(argv[i], "--cpu") == 0)
cpu = true;
if (argv[i] && strcmp(argv[i], "--print") == 0)
print = true;
if (argv[i] && strcmp(argv[i], "--thrust") == 0)
thrust = true;
if (argv[i] && strcmp(argv[i], "--stress") == 0)
stress = true;
// if (argv[i] && strcmp(argv[i], "--peaks") == 0)
// peaks = true;
if (argv[i] && strcmp(argv[i], "--load") == 0)
load = true;
// if (argv[i] && strcmp(argv[i], "--sharp") == 0)
// sharp = true;
if (argv[i] && strcmp(argv[i], "--rnd") == 0)
rnd = true;
}
{
// Allocate keys:
int* INPUT = NULL;
int* hostINPUT = (int*)malloc(4 * sizeof(int) * (TEST_SIZE + 3));
assert(hostINPUT);
fillInput(hostINPUT, load, rnd);
if (!cpu)
{
cudaMalloc(&INPUT, 4 * sizeof(int) * TEST_SIZE);
assert(INPUT);
cudaMemcpy(INPUT, hostINPUT, 4 * sizeof(int) * TEST_SIZE, cudaMemcpyHostToDevice);
}
void* tmpBuffer = NULL;
int zero = 0;
int tmpbufsize = getHistogramBufSize<histogram_atomic_inc>(zero , (int)TESTMAXIDX);
cudaMalloc(&tmpBuffer, tmpbufsize);
// Create events for timing:
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Now start timer - we run on stream 0 (default stream):
cudaEventRecord(start, 0);
for (i = 0; i < NRUNS; i++)
{
if (thrust)
{
#if ENABLE_THRUST
testHistogramParamThrust(INPUT, index_0, index_1, print);
#else
printf("\nTest was compiled without thrust support! Find 'ENABLE_THRUST' in source-code!\n\n Exiting...\n");
break;
#endif
}
else
{
testHistogramParam((uint4*)INPUT, (uint4*)hostINPUT, index_0, index_1, print, cpu, stress, tmpBuffer);
}
print = false;
// Run only once all stress-tests
if (stress) break;
}
{
float t_ms;
cudaEventRecord(stop, 0);
cudaThreadSynchronize();
cudaEventElapsedTime(&t_ms, start, stop);
double t = t_ms * 0.001f;
double GKps = (((double)TEST_SIZE * (double)NRUNS * 16.0)) / (t*1.e9);
printf("Runtime in loops: %fs, Throughput (Gkeys/s): %3f GK/s \n", t, GKps);
}
if (INPUT) cudaFree(INPUT);
if (hostINPUT) free(hostINPUT);
if (tmpBuffer) cudaFree(tmpBuffer);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
return 0;
}
|
b8579e9cc310ba63c7a032e60b3e65b7b60d0994.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ __host__ int maximum( int a, int b, int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
__global__ void needle_cuda_shared_1( int* reference, int* matrix_cuda, int cols, int penalty, int i, int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx;
int b_index_y = i - 1 - bx;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) {
ref[ty][tx] = reference[index + cols * ty];
}
__syncthreads();
if (tx == 0) {
temp[tx][0] = matrix_cuda[index_nw];
}
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
__syncthreads();
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) {
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
} | b8579e9cc310ba63c7a032e60b3e65b7b60d0994.cu | #include "includes.h"
__device__ __host__ int maximum( int a, int b, int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
__global__ void needle_cuda_shared_1( int* reference, int* matrix_cuda, int cols, int penalty, int i, int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx;
int b_index_y = i - 1 - bx;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) {
ref[ty][tx] = reference[index + cols * ty];
}
__syncthreads();
if (tx == 0) {
temp[tx][0] = matrix_cuda[index_nw];
}
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
__syncthreads();
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) {
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
} |
ccce158f1da647fe80cfe11e15845170568856d4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
int* read_array(const char* filename, int len) {
int *x = (int*) malloc(len * sizeof(int));
FILE *fp = fopen(filename, "r");
for (int i = 0; i < len; i++) {
fscanf(fp, "%d", &x[i]);
}
fclose(fp);
return x;
}
int main(int argc, char *argv[]) {
if (argc != 1) {
printf("Invalid argument Usage: ./problem1");
return -1;
}
const int rowWidth=32;
const int colWidth=16;
int *hA = read_array("inputA.inp",rowWidth*colWidth );
int *hB = read_array("inputB.inp", rowWidth);
int *hC = (int*) malloc(colWidth * sizeof(int));
int *refC;
// TODO - allocate host memory for refC (you have to figure out how much)
// The skeleton currently segfaults because refC is accessed without allocation
// TODO do a reference host implementation (Ch) here. ie populate answer in refC
int *dA, *dB, *dC;
// TODO allocate device memory for dA,dB and dC
// TODO copy data from host to GPU
// TODO call your kernel
// TODO copyback results
float Error=0;
for(int i=0;i<colWidth;i++)
Error+=(hC[i]-refC[i])*(hC[i]-refC[i]);
printf("%f\n%d",sqrt(Error),hC[colWidth-1]);
free(refC);
free(hB);
free(hA);
return 0;
}
| ccce158f1da647fe80cfe11e15845170568856d4.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
int* read_array(const char* filename, int len) {
int *x = (int*) malloc(len * sizeof(int));
FILE *fp = fopen(filename, "r");
for (int i = 0; i < len; i++) {
fscanf(fp, "%d", &x[i]);
}
fclose(fp);
return x;
}
int main(int argc, char *argv[]) {
if (argc != 1) {
printf("Invalid argument Usage: ./problem1");
return -1;
}
const int rowWidth=32;
const int colWidth=16;
int *hA = read_array("inputA.inp",rowWidth*colWidth );
int *hB = read_array("inputB.inp", rowWidth);
int *hC = (int*) malloc(colWidth * sizeof(int));
int *refC;
// TODO - allocate host memory for refC (you have to figure out how much)
// The skeleton currently segfaults because refC is accessed without allocation
// TODO do a reference host implementation (Ch) here. ie populate answer in refC
int *dA, *dB, *dC;
// TODO allocate device memory for dA,dB and dC
// TODO copy data from host to GPU
// TODO call your kernel
// TODO copyback results
float Error=0;
for(int i=0;i<colWidth;i++)
Error+=(hC[i]-refC[i])*(hC[i]-refC[i]);
printf("%f\n%d",sqrt(Error),hC[colWidth-1]);
free(refC);
free(hB);
free(hA);
return 0;
}
|
74994cf43e05166958fd2b3f3337d1334d1d1b5b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/helpers.h>
#include <ops/declarable/helpers/hamming.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename X, typename Z>
static _CUDA_G void _hammingKernel(const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, void *reductionBuffer, Nd4jLong length) {
auto x = reinterpret_cast<const X*>(vx);
auto y = reinterpret_cast<const X*>(vy);
auto z = reinterpret_cast<Z*>(vz);
__shared__ Nd4jLong shared[CUDA_BLOCK_SIZE];
// we want to nullify temporary memory before accumulating intermediate results
shared[threadIdx.x] = 0;
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += blockDim.x * gridDim.x) {
auto _x = static_cast<unsigned long long>(x[shape::getIndexOffset(e, xShapeInfo)]);
auto _y = static_cast<unsigned long long>(y[shape::getIndexOffset(e, yShapeInfo)]);
// we save intermediate result into shared memory
shared[threadIdx.x] += __popcll(_x ^ _y);
}
__syncthreads();
// now we accumulate values
auto numItems = sd::math::nd4j_min<Nd4jLong>(blockDim.x, length);
auto floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1))
floorPow2 &= floorPow2 - 1;
if (threadIdx.x >= floorPow2)
shared[threadIdx.x - floorPow2] = shared[threadIdx.x - floorPow2] + shared[threadIdx.x];
__syncthreads();
}
__syncthreads();
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (threadIdx.x < activeThreads && threadIdx.x + activeThreads < numItems)
shared[threadIdx.x] = shared[threadIdx.x] + shared[threadIdx.x + activeThreads];
__syncthreads();
}
__syncthreads();
// FIXME: do we really want atomicAdd on global memory here
// and store them to output
if (threadIdx.x == 0 && shared[0] > 0)
sd::math::atomics::nd4j_atomicAdd<Z>(&z[0], static_cast<Z>(shared[threadIdx.x]));
}
template <typename X, typename Z>
static void _hamming(LaunchContext *context, NDArray &x, NDArray &y, NDArray &z) {
hipLaunchKernelGGL(( _hammingKernel<X, Z>), dim3(256), dim3(CUDA_BLOCK_SIZE), 1024, *context->getCudaStream(), x.specialBuffer(), x.specialShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.specialBuffer(), nullptr, x.lengthOf());
}
void hamming(LaunchContext *context, NDArray &x, NDArray &y, NDArray &output) {
NDArray::prepareSpecialUse({&output}, {&x, &y});
BUILD_DOUBLE_SELECTOR(x.dataType(), output.dataType(), _hamming, (context, x, y, output), INTEGER_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({&output}, {&x, &y});
}
}
}
} | 74994cf43e05166958fd2b3f3337d1334d1d1b5b.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/helpers.h>
#include <ops/declarable/helpers/hamming.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename X, typename Z>
static _CUDA_G void _hammingKernel(const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, void *reductionBuffer, Nd4jLong length) {
auto x = reinterpret_cast<const X*>(vx);
auto y = reinterpret_cast<const X*>(vy);
auto z = reinterpret_cast<Z*>(vz);
__shared__ Nd4jLong shared[CUDA_BLOCK_SIZE];
// we want to nullify temporary memory before accumulating intermediate results
shared[threadIdx.x] = 0;
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += blockDim.x * gridDim.x) {
auto _x = static_cast<unsigned long long>(x[shape::getIndexOffset(e, xShapeInfo)]);
auto _y = static_cast<unsigned long long>(y[shape::getIndexOffset(e, yShapeInfo)]);
// we save intermediate result into shared memory
shared[threadIdx.x] += __popcll(_x ^ _y);
}
__syncthreads();
// now we accumulate values
auto numItems = sd::math::nd4j_min<Nd4jLong>(blockDim.x, length);
auto floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1))
floorPow2 &= floorPow2 - 1;
if (threadIdx.x >= floorPow2)
shared[threadIdx.x - floorPow2] = shared[threadIdx.x - floorPow2] + shared[threadIdx.x];
__syncthreads();
}
__syncthreads();
for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (threadIdx.x < activeThreads && threadIdx.x + activeThreads < numItems)
shared[threadIdx.x] = shared[threadIdx.x] + shared[threadIdx.x + activeThreads];
__syncthreads();
}
__syncthreads();
// FIXME: do we really want atomicAdd on global memory here
// and store them to output
if (threadIdx.x == 0 && shared[0] > 0)
sd::math::atomics::nd4j_atomicAdd<Z>(&z[0], static_cast<Z>(shared[threadIdx.x]));
}
template <typename X, typename Z>
static void _hamming(LaunchContext *context, NDArray &x, NDArray &y, NDArray &z) {
_hammingKernel<X, Z><<<256, CUDA_BLOCK_SIZE, 1024, *context->getCudaStream()>>>(x.specialBuffer(), x.specialShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.specialBuffer(), nullptr, x.lengthOf());
}
void hamming(LaunchContext *context, NDArray &x, NDArray &y, NDArray &output) {
NDArray::prepareSpecialUse({&output}, {&x, &y});
BUILD_DOUBLE_SELECTOR(x.dataType(), output.dataType(), _hamming, (context, x, y, output), INTEGER_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({&output}, {&x, &y});
}
}
}
} |
54f353d1a0eb108ff1a81d00534459110b1b856d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
# include <stdio.h>
# include <stdlib.h>
# include <hip/hip_runtime.h>
# include <sys/time.h>
# include <unistd.h>
# define BLOCK_SIZE (32)
//# define n 128
//# define n 256
//# define n 512
//# define n 1024
//# define n 2048
//# define n 4096
# define n 8192
# define threshold 1e-8
double rtclock(void)
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0)
printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
void compare(int N, double *wref, double *w)
{
double maxdiff,this_diff;
int numdiffs;
int i,j;
numdiffs = 0;
maxdiff = 0;
for(i=0;i<N;i++)
for(j=0;j<N;j++)
{
this_diff = wref[i*N+j]-w[i*N+j];
if(this_diff < 0)
this_diff = -1.0*this_diff;
if(this_diff>threshold)
{
numdiffs++;
if(this_diff > maxdiff)
maxdiff=this_diff;
}
}
if(numdiffs > 0)
printf("%d Diffs found over threshold %f; Max Diff = %f\n", numdiffs, threshold, maxdiff);
else
printf("No differences found between reference and test versions\n");
}
int *mat_mul_ord(int *A, int *B, int *C)
{
for(int i = 0; i < n; i++)
for(int j = 0; j < n; j++)
{
int sum = 0;
for(int k = 0; k < n; k++)
sum += A[i*n+k] * B[k*n+j];
C[i*n+j] = sum;
}
return C;
}
__global__ void mat_mul_dev(int *A, int *B, int *C)
{
int x=threadIdx.y+blockIdx.y*blockDim.y;
int y=threadIdx.x+blockIdx.x*blockDim.x;
int sum=0;
if((x<n)&&(y<n))
for (int k=0;k<n;k++)
sum += A[x*n+k]*B[y*n+k];
C[x*n+y]=sum;
}
__global__ void matrixMul(int *A, int *B, int *C)
{
// Declaration of the shared memory arrays As and Bs used to store the sub-matrices of A and B respectively
__shared__ int As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE];
int w = BLOCK_SIZE;
// Block Index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread Index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Row 'row' and Column 'col' of matrix A or B
int col = bx*w + tx;
int row = by*w + ty;
// Cv is used to store the element of the block sub-matrix that is computed by the thread
int Cv = 0;
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for(int k = 0; k < n/w; k++)
{
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
As[ty][tx] = A[row*n + (k*w + tx)];
Bs[ty][tx] = B[(k*w + ty)*n + col];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together; each thread computes one element of the block sub-matrix
for(int l = 0; l < w; l++)
Cv += As[ty][l] * Bs[l][tx];
}
// Write the block sub-matrix to device memory; each thread writes one element
C[row*n + col] = Cv;
}
int main()
{
int *A, *B, *C, *Cref1, *Cref2;
int *A_d, *B_d, *C_d;
int i, j;
double clkbegin, clkend, t;
A = (int *) malloc(n*n*sizeof(int*));
B = (int *) malloc(n*n*sizeof(int*));
C = (int *) malloc(n*n*sizeof(int*));
Cref1 = (int *) malloc(n*n*sizeof(int*));
Cref2 = (int *) malloc(n*n*sizeof(int*));
int size = n*n*sizeof(int);
// Initialise the input data on the CPU
for(i = 0; i < n; i++)
for(j = 0; j < n; j++)
{
A[i*n+j] = 2;//i+j;
B[i*n+j] = 1;//2+i+j;
}
clkbegin = rtclock();
C = mat_mul_ord(A, B, C);
clkend = rtclock();
t = clkend-clkbegin;
printf("GPU: Approx GFLOPS: %.1f ; Time = %f sec ; c[n/2][n/2-1] = %d\n", 2.0*n*n*n/t/1e9, t, C[((n/2)*n)+n/2-1]);
// Create corresponding int arrays on the GPU
hipMalloc((void**)&A_d, size);
hipMalloc((void**)&B_d, size);
hipMalloc((void**)&C_d, size);
// Copy input data to array on GPU
hipMemcpy(A_d, A, size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B, size, hipMemcpyHostToDevice);
// Set the grid and block sizes to launch kernel
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(n/BLOCK_SIZE, n/BLOCK_SIZE);
clkbegin = rtclock();
hipLaunchKernelGGL(( mat_mul_dev), dim3(grid), dim3(block), 0, 0, A_d, B_d, C_d);
clkend = rtclock();
t = clkend-clkbegin;
hipMemcpy(Cref1, C_d, size, hipMemcpyDeviceToHost);
printf("GPU: Approx GFLOPS: %.1f ; Time = %f sec ; c[n/2][n/2-1] = %d\n", 2.0*n*n*n/t/1e9, t, Cref1[((n/2)*n)+n/2-1]);
clkbegin = rtclock();
hipLaunchKernelGGL(( matrixMul), dim3(grid), dim3(block), 0, 0, A_d, B_d, C_d);
clkend = rtclock();
t = clkend-clkbegin;
// Copy output array from GPU back to CPU
hipMemcpy(Cref2, C_d, size, hipMemcpyDeviceToHost);
// Free up the arrays on the GPU
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
/*
for(i = 0; i < n; i++)
{
for(j = 0; j < n; j++)
printf("%d ", C[i*n+j]);
printf("\n");
}
*/
printf("GPU: Approx GFLOPS: %.1f ; Time = %f sec ; C[n/2][n/2-1] = %d\n", 2.0*n*n*n/t/1e9, t, Cref2[((n/2)*n)+n/2-1]);
// printf("GPU: Approx GFLOPS: %.1f ; Time = %f sec ; c[n/2][n/2-1] = %d\n", 2.0*n*n*n/t/1e9, t, Cref[((n/2)*n)+n/2-1]);
compare(n, (double *) C,(double *) Cref1);
compare(n, (double *) C,(double *) Cref2);
return 0;
}
| 54f353d1a0eb108ff1a81d00534459110b1b856d.cu |
# include <stdio.h>
# include <stdlib.h>
# include <cuda.h>
# include <sys/time.h>
# include <unistd.h>
# define BLOCK_SIZE (32)
//# define n 128
//# define n 256
//# define n 512
//# define n 1024
//# define n 2048
//# define n 4096
# define n 8192
# define threshold 1e-8
double rtclock(void)
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0)
printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
void compare(int N, double *wref, double *w)
{
double maxdiff,this_diff;
int numdiffs;
int i,j;
numdiffs = 0;
maxdiff = 0;
for(i=0;i<N;i++)
for(j=0;j<N;j++)
{
this_diff = wref[i*N+j]-w[i*N+j];
if(this_diff < 0)
this_diff = -1.0*this_diff;
if(this_diff>threshold)
{
numdiffs++;
if(this_diff > maxdiff)
maxdiff=this_diff;
}
}
if(numdiffs > 0)
printf("%d Diffs found over threshold %f; Max Diff = %f\n", numdiffs, threshold, maxdiff);
else
printf("No differences found between reference and test versions\n");
}
int *mat_mul_ord(int *A, int *B, int *C)
{
for(int i = 0; i < n; i++)
for(int j = 0; j < n; j++)
{
int sum = 0;
for(int k = 0; k < n; k++)
sum += A[i*n+k] * B[k*n+j];
C[i*n+j] = sum;
}
return C;
}
__global__ void mat_mul_dev(int *A, int *B, int *C)
{
int x=threadIdx.y+blockIdx.y*blockDim.y;
int y=threadIdx.x+blockIdx.x*blockDim.x;
int sum=0;
if((x<n)&&(y<n))
for (int k=0;k<n;k++)
sum += A[x*n+k]*B[y*n+k];
C[x*n+y]=sum;
}
__global__ void matrixMul(int *A, int *B, int *C)
{
// Declaration of the shared memory arrays As and Bs used to store the sub-matrices of A and B respectively
__shared__ int As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE];
int w = BLOCK_SIZE;
// Block Index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread Index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Row 'row' and Column 'col' of matrix A or B
int col = bx*w + tx;
int row = by*w + ty;
// Cv is used to store the element of the block sub-matrix that is computed by the thread
int Cv = 0;
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for(int k = 0; k < n/w; k++)
{
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
As[ty][tx] = A[row*n + (k*w + tx)];
Bs[ty][tx] = B[(k*w + ty)*n + col];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together; each thread computes one element of the block sub-matrix
for(int l = 0; l < w; l++)
Cv += As[ty][l] * Bs[l][tx];
}
// Write the block sub-matrix to device memory; each thread writes one element
C[row*n + col] = Cv;
}
int main()
{
int *A, *B, *C, *Cref1, *Cref2;
int *A_d, *B_d, *C_d;
int i, j;
double clkbegin, clkend, t;
A = (int *) malloc(n*n*sizeof(int*));
B = (int *) malloc(n*n*sizeof(int*));
C = (int *) malloc(n*n*sizeof(int*));
Cref1 = (int *) malloc(n*n*sizeof(int*));
Cref2 = (int *) malloc(n*n*sizeof(int*));
int size = n*n*sizeof(int);
// Initialise the input data on the CPU
for(i = 0; i < n; i++)
for(j = 0; j < n; j++)
{
A[i*n+j] = 2;//i+j;
B[i*n+j] = 1;//2+i+j;
}
clkbegin = rtclock();
C = mat_mul_ord(A, B, C);
clkend = rtclock();
t = clkend-clkbegin;
printf("GPU: Approx GFLOPS: %.1f ; Time = %f sec ; c[n/2][n/2-1] = %d\n", 2.0*n*n*n/t/1e9, t, C[((n/2)*n)+n/2-1]);
// Create corresponding int arrays on the GPU
cudaMalloc((void**)&A_d, size);
cudaMalloc((void**)&B_d, size);
cudaMalloc((void**)&C_d, size);
// Copy input data to array on GPU
cudaMemcpy(A_d, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, size, cudaMemcpyHostToDevice);
// Set the grid and block sizes to launch kernel
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(n/BLOCK_SIZE, n/BLOCK_SIZE);
clkbegin = rtclock();
mat_mul_dev<<<grid, block>>>(A_d, B_d, C_d);
clkend = rtclock();
t = clkend-clkbegin;
cudaMemcpy(Cref1, C_d, size, cudaMemcpyDeviceToHost);
printf("GPU: Approx GFLOPS: %.1f ; Time = %f sec ; c[n/2][n/2-1] = %d\n", 2.0*n*n*n/t/1e9, t, Cref1[((n/2)*n)+n/2-1]);
clkbegin = rtclock();
matrixMul<<<grid, block>>>(A_d, B_d, C_d);
clkend = rtclock();
t = clkend-clkbegin;
// Copy output array from GPU back to CPU
cudaMemcpy(Cref2, C_d, size, cudaMemcpyDeviceToHost);
// Free up the arrays on the GPU
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
/*
for(i = 0; i < n; i++)
{
for(j = 0; j < n; j++)
printf("%d ", C[i*n+j]);
printf("\n");
}
*/
printf("GPU: Approx GFLOPS: %.1f ; Time = %f sec ; C[n/2][n/2-1] = %d\n", 2.0*n*n*n/t/1e9, t, Cref2[((n/2)*n)+n/2-1]);
// printf("GPU: Approx GFLOPS: %.1f ; Time = %f sec ; c[n/2][n/2-1] = %d\n", 2.0*n*n*n/t/1e9, t, Cref[((n/2)*n)+n/2-1]);
compare(n, (double *) C,(double *) Cref1);
compare(n, (double *) C,(double *) Cref2);
return 0;
}
|
10e6b646471f372770ad8d5c58a99f871fe6bcad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_hip.cuh"
__device__
unsigned char normalizeColor(int sum)
{
if (sum > 255) return 255;
return (unsigned char)sum;
}
__global__
void generateImage(Pixel* pixels, int pixelsCount,
Color* result, int cols, int rows, int size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < size; i += stride)
{
int currentRow = index / cols;
int currentCol = index % cols;
int red = 0, green = 0, blue = 0;
for (int j = 0; j < pixelsCount; j++)
{
double rowDist = currentRow - pixels[j].point.y;
double colDist = currentCol - pixels[j].point.x;
double distance = rowDist * rowDist + colDist * colDist + 1;
// abs(rowDist) + abs(colDist) + 1;
red += pixels[j].color.red * pixels[j].color.red / distance;
green += pixels[j].color.green * pixels[j].color.green / distance;
blue += pixels[j].color.blue * pixels[j].color.blue / distance;
}
result[i].red = normalizeColor(red);
result[i].green = normalizeColor(green);
result[i].blue = normalizeColor(blue);
}
} | 10e6b646471f372770ad8d5c58a99f871fe6bcad.cu | #include "kernel.cuh"
__device__
unsigned char normalizeColor(int sum)
{
if (sum > 255) return 255;
return (unsigned char)sum;
}
__global__
void generateImage(Pixel* pixels, int pixelsCount,
Color* result, int cols, int rows, int size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < size; i += stride)
{
int currentRow = index / cols;
int currentCol = index % cols;
int red = 0, green = 0, blue = 0;
for (int j = 0; j < pixelsCount; j++)
{
double rowDist = currentRow - pixels[j].point.y;
double colDist = currentCol - pixels[j].point.x;
double distance = rowDist * rowDist + colDist * colDist + 1;
// abs(rowDist) + abs(colDist) + 1;
red += pixels[j].color.red * pixels[j].color.red / distance;
green += pixels[j].color.green * pixels[j].color.green / distance;
blue += pixels[j].color.blue * pixels[j].color.blue / distance;
}
result[i].red = normalizeColor(red);
result[i].green = normalizeColor(green);
result[i].blue = normalizeColor(blue);
}
} |
a13f88d04e0134590e9684f2c96a937bea367250.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#include <assert.h>
#include <cutil_inline.h>
// #include <vector>
////////////////////////////////////////////////////////////////////////////////
// Convolution kernel storage
////////////////////////////////////////////////////////////////////////////////
__constant__ float c_Kernel_h[100];
__constant__ float c_Kernel_v[100];
extern "C" void setConvolutionKernel_horizontal(float *h_Kernel, int kernel_length){
hipMemcpyToSymbol(c_Kernel_h, h_Kernel, kernel_length * sizeof(float));
}
extern "C" void setConvolutionKernel_vertical(float *h_Kernel, int kernel_length){
hipMemcpyToSymbol(c_Kernel_v, h_Kernel, kernel_length * sizeof(float));
}
////////////////////////////////////////////////////////////////////////////////
// Constants
////////////////////////////////////////////////////////////////////////////////
#define ROWS_BLOCKDIM_X 16
#define ROWS_BLOCKDIM_Y 16
#define ROWS_RESULT_STEPS 8
#define ROWS_HALO_STEPS 3
#define COLUMNS_BLOCKDIM_X 16
#define COLUMNS_BLOCKDIM_Y 16
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 3
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch,
int kernel_radius
){
__shared__ float s_Data[ROWS_BLOCKDIM_Y]
[(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) *
ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Main data
#pragma unroll
for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
s_Data[threadIdx.y]
[threadIdx.x + i * ROWS_BLOCKDIM_X]
= d_Src[i * ROWS_BLOCKDIM_X];
//Left halo
for(int i = 0; i < ROWS_HALO_STEPS; i++){
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] =
(baseX >= -i * ROWS_BLOCKDIM_X ) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Right halo
for(int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS;
i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++){
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] =
(imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -kernel_radius; j <= kernel_radius; j++)
sum += c_Kernel_h[kernel_radius - j] *
s_Data [threadIdx.y]
[threadIdx.x + i * ROWS_BLOCKDIM_X + j];
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
extern "C" void convolutionRowsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int kernel_radius
){
assert( ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= kernel_radius );
//There is a rational division of the image into blocks
assert( imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0 );
assert( imageH % ROWS_BLOCKDIM_Y == 0 );
dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y);
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
hipLaunchKernelGGL(( convolutionRowsKernel), dim3(blocks), dim3(threads), 0, 0,
d_Dst,
d_Src,
imageW,
imageH,
imageW,
kernel_radius
);
cutilCheckMsg("convolutionRowsKernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch,
int kernel_radius
){
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Main data
#pragma unroll
for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch];
//Upper halo
for(int i = 0; i < COLUMNS_HALO_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] =
(baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
//Lower halo
for(int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] =
(imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
//Compute and store results
__syncthreads();
#pragma unroll
for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -kernel_radius; j <= kernel_radius; j++)
sum += c_Kernel_v[kernel_radius - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
}
extern "C" void convolutionColumnsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int kernel_radius
){
assert( COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= kernel_radius );
assert( imageW % COLUMNS_BLOCKDIM_X == 0 );
assert( imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0 );
dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
hipLaunchKernelGGL(( convolutionColumnsKernel), dim3(blocks), dim3(threads), 0, 0,
d_Dst,
d_Src,
imageW,
imageH,
imageW,
kernel_radius
);
cutilCheckMsg("convolutionColumnsKernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Simple interface to compute a derivative
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Computes the higher eigenvalue of the hessian
////////////////////////////////////////////////////////////////////////////////
__global__ void hessianKernel(
float *d_output,
float *d_gxx,
float *d_gxy,
float *d_gyy,
float scale,
int imageW,
int imageH,
int invert
){
int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW +
blockDim.x*blockIdx.x+threadIdx.x;
float a, b, c;
a = invert*d_gxx[i];
b = invert*d_gxy[i];
c = invert*d_gyy[i];
d_output[i] = ((a+c)/2 + sqrt( (a-c)*(a-c) + 4*b*b)/2)*scale*scale;
// d_output[i] = (a-c)*(a-c) + 4*b*b;
// d_output[i] = b;
}
extern "C" void hessianGPU
(
float *d_output,
float *d_gxx,
float *d_gxy,
float *d_gyy,
float scale,
int imageW,
int imageH,
int invert
)
{
dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y));
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
hipLaunchKernelGGL(( hessianKernel), dim3(gird), dim3(block), 0, 0, d_output, d_gxx, d_gxy, d_gyy, scale, imageW, imageH, invert );
cutilCheckMsg("hessianKernel() execution failed\n");
}
//////////////// MAX /////////
__global__ void maxKernel(
float *d_output,
float *d_isMaxThanOutput,
int imageW,
int imageH
){
int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW +
blockDim.x*blockIdx.x+threadIdx.x;
if(d_isMaxThanOutput[i] >= d_output[i])
d_output[i] = d_isMaxThanOutput[i];
}
extern "C" void maxGPU
(
float *d_output,
float *d_isMaxThanOutput,
int imageW,
int imageH
)
{
dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y));
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
hipLaunchKernelGGL(( maxKernel), dim3(gird), dim3(block), 0, 0, d_output, d_isMaxThanOutput, imageW, imageH );
cutilCheckMsg("maxKernel() execution failed\n");
}
__global__ void maxKernel_scale(
float *d_output,
float *d_scale,
float *d_isMaxThanOutput,
float scale,
int imageW,
int imageH
){
int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW +
blockDim.x*blockIdx.x+threadIdx.x;
if(d_isMaxThanOutput[i] > d_output[i]){
d_output[i] = d_isMaxThanOutput[i];
if(d_output[i] > 30)
d_scale[i] = scale;
}
}
extern "C" void maxGPU_scale
(
float *d_output,
float *d_scale,
float *d_isMaxThanOutput,
float scale,
int imageW,
int imageH
)
{
dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y));
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
hipLaunchKernelGGL(( maxKernel_scale), dim3(gird), dim3(block), 0, 0, d_output, d_scale, d_isMaxThanOutput, scale,
imageW, imageH );
cutilCheckMsg("maxKernel() execution failed\n");
}
////////////////////////// PUT VALUE
__global__ void putKernel(
float *d_output,
float value,
int imageW,
int imageH
){
int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW +
blockDim.x*blockIdx.x+threadIdx.x;
d_output[i] = value;
}
extern "C" void putGPU
(
float *d_output,
float value,
int imageW,
int imageH
)
{
dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y));
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
hipLaunchKernelGGL(( putKernel), dim3(gird), dim3(block), 0, 0, d_output, value, imageW, imageH );
cutilCheckMsg("maxKernel() execution failed\n");
}
| a13f88d04e0134590e9684f2c96a937bea367250.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#include <assert.h>
#include <cutil_inline.h>
// #include <vector>
////////////////////////////////////////////////////////////////////////////////
// Convolution kernel storage
////////////////////////////////////////////////////////////////////////////////
__constant__ float c_Kernel_h[100];
__constant__ float c_Kernel_v[100];
extern "C" void setConvolutionKernel_horizontal(float *h_Kernel, int kernel_length){
cudaMemcpyToSymbol(c_Kernel_h, h_Kernel, kernel_length * sizeof(float));
}
extern "C" void setConvolutionKernel_vertical(float *h_Kernel, int kernel_length){
cudaMemcpyToSymbol(c_Kernel_v, h_Kernel, kernel_length * sizeof(float));
}
////////////////////////////////////////////////////////////////////////////////
// Constants
////////////////////////////////////////////////////////////////////////////////
#define ROWS_BLOCKDIM_X 16
#define ROWS_BLOCKDIM_Y 16
#define ROWS_RESULT_STEPS 8
#define ROWS_HALO_STEPS 3
#define COLUMNS_BLOCKDIM_X 16
#define COLUMNS_BLOCKDIM_Y 16
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 3
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch,
int kernel_radius
){
__shared__ float s_Data[ROWS_BLOCKDIM_Y]
[(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) *
ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Main data
#pragma unroll
for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
s_Data[threadIdx.y]
[threadIdx.x + i * ROWS_BLOCKDIM_X]
= d_Src[i * ROWS_BLOCKDIM_X];
//Left halo
for(int i = 0; i < ROWS_HALO_STEPS; i++){
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] =
(baseX >= -i * ROWS_BLOCKDIM_X ) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Right halo
for(int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS;
i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++){
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] =
(imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -kernel_radius; j <= kernel_radius; j++)
sum += c_Kernel_h[kernel_radius - j] *
s_Data [threadIdx.y]
[threadIdx.x + i * ROWS_BLOCKDIM_X + j];
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
extern "C" void convolutionRowsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int kernel_radius
){
assert( ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= kernel_radius );
//There is a rational division of the image into blocks
assert( imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0 );
assert( imageH % ROWS_BLOCKDIM_Y == 0 );
dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y);
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
convolutionRowsKernel<<<blocks, threads>>>(
d_Dst,
d_Src,
imageW,
imageH,
imageW,
kernel_radius
);
cutilCheckMsg("convolutionRowsKernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch,
int kernel_radius
){
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Main data
#pragma unroll
for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch];
//Upper halo
for(int i = 0; i < COLUMNS_HALO_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] =
(baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
//Lower halo
for(int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] =
(imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
//Compute and store results
__syncthreads();
#pragma unroll
for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -kernel_radius; j <= kernel_radius; j++)
sum += c_Kernel_v[kernel_radius - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
}
extern "C" void convolutionColumnsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int kernel_radius
){
assert( COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= kernel_radius );
assert( imageW % COLUMNS_BLOCKDIM_X == 0 );
assert( imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0 );
dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
convolutionColumnsKernel<<<blocks, threads>>>(
d_Dst,
d_Src,
imageW,
imageH,
imageW,
kernel_radius
);
cutilCheckMsg("convolutionColumnsKernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Simple interface to compute a derivative
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Computes the higher eigenvalue of the hessian
////////////////////////////////////////////////////////////////////////////////
__global__ void hessianKernel(
float *d_output,
float *d_gxx,
float *d_gxy,
float *d_gyy,
float scale,
int imageW,
int imageH,
int invert
){
int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW +
blockDim.x*blockIdx.x+threadIdx.x;
float a, b, c;
a = invert*d_gxx[i];
b = invert*d_gxy[i];
c = invert*d_gyy[i];
d_output[i] = ((a+c)/2 + sqrt( (a-c)*(a-c) + 4*b*b)/2)*scale*scale;
// d_output[i] = (a-c)*(a-c) + 4*b*b;
// d_output[i] = b;
}
extern "C" void hessianGPU
(
float *d_output,
float *d_gxx,
float *d_gxy,
float *d_gyy,
float scale,
int imageW,
int imageH,
int invert
)
{
dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y));
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
hessianKernel<<<gird, block>>>( d_output, d_gxx, d_gxy, d_gyy, scale, imageW, imageH, invert );
cutilCheckMsg("hessianKernel() execution failed\n");
}
//////////////// MAX /////////
__global__ void maxKernel(
float *d_output,
float *d_isMaxThanOutput,
int imageW,
int imageH
){
int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW +
blockDim.x*blockIdx.x+threadIdx.x;
if(d_isMaxThanOutput[i] >= d_output[i])
d_output[i] = d_isMaxThanOutput[i];
}
extern "C" void maxGPU
(
float *d_output,
float *d_isMaxThanOutput,
int imageW,
int imageH
)
{
dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y));
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
maxKernel<<<gird, block>>>( d_output, d_isMaxThanOutput, imageW, imageH );
cutilCheckMsg("maxKernel() execution failed\n");
}
__global__ void maxKernel_scale(
float *d_output,
float *d_scale,
float *d_isMaxThanOutput,
float scale,
int imageW,
int imageH
){
int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW +
blockDim.x*blockIdx.x+threadIdx.x;
if(d_isMaxThanOutput[i] > d_output[i]){
d_output[i] = d_isMaxThanOutput[i];
if(d_output[i] > 30)
d_scale[i] = scale;
}
}
extern "C" void maxGPU_scale
(
float *d_output,
float *d_scale,
float *d_isMaxThanOutput,
float scale,
int imageW,
int imageH
)
{
dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y));
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
maxKernel_scale<<<gird, block>>>( d_output, d_scale, d_isMaxThanOutput, scale,
imageW, imageH );
cutilCheckMsg("maxKernel() execution failed\n");
}
////////////////////////// PUT VALUE
__global__ void putKernel(
float *d_output,
float value,
int imageW,
int imageH
){
int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW +
blockDim.x*blockIdx.x+threadIdx.x;
d_output[i] = value;
}
extern "C" void putGPU
(
float *d_output,
float value,
int imageW,
int imageH
)
{
dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y));
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
putKernel<<<gird, block>>>( d_output, value, imageW, imageH );
cutilCheckMsg("maxKernel() execution failed\n");
}
|
2e12334f0aed9d8e04a09e64d488b1a4081f21b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "IndiceTools_GPU.h"
#include "RipplingMath.h"
using namespace gpu;
// Attention : Choix du nom est impotant!
// VagueDevice.cu et non Vague.cu
// Dans ce dernier cas, problme de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diffrents!
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t)
{
RipplingMath ripplingMath = RipplingMath(w, h);
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
const int WH = w * h;
int i; // in [0,h[
int j; // in [0,w[
int s = TID; // in [0,...
while (s < WH)
{
IndiceTools::toIJ(s, w, &i, &j); // s[0,W*H[ --> i[0,H[ j[0,W[
ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t); // update ptrTabPixels[s]
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 2e12334f0aed9d8e04a09e64d488b1a4081f21b3.cu | #include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "IndiceTools_GPU.h"
#include "RipplingMath.h"
using namespace gpu;
// Attention : Choix du nom est impotant!
// VagueDevice.cu et non Vague.cu
// Dans ce dernier cas, probl�me de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diff�rents!
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t)
{
RipplingMath ripplingMath = RipplingMath(w, h);
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
const int WH = w * h;
int i; // in [0,h[
int j; // in [0,w[
int s = TID; // in [0,...
while (s < WH)
{
IndiceTools::toIJ(s, w, &i, &j); // s[0,W*H[ --> i[0,H[ j[0,W[
ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t); // update ptrTabPixels[s]
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
31edf7a5a7ed53001f3960f57ad6df570ca93d05.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <sys/resource.h>
typedef struct{
char** name;
char* chrom_c;
//int* chrom;
long* pos;
//long* c_pos;
char** rest;
}SNP;
typedef struct{
char* snp_name;
int* a_id; //length is the number of animals
char* ab1;
char* ab2;
int* ab;
}Sample;
int NSNPS;
int NSAMPLES;
__device__ void sort_by_bit(SNP* snps, Sample* samples, int bit);
__device__ long scan(long* x);
void read_files(char* map_path, char* snp_path, char** data_string, char** snps_data){
FILE *fd;
int err;
int num_lines = -1;
char** header_array;
int i;
/***********************Allocate string for header info**********/
printf("Allocating string for header array...\n");
header_array = (char**) malloc( 10 * sizeof(char*));
for(i = 0; i < 10; i++){
header_array[i] = (char*)malloc(100);
}
/*****************************************************************/
fd = fopen(snp_path, "r");
/*******Getting number of SNP and Sample from header****/
printf("Getting number of SNPs and Samples from header...\n");
do {
err = fscanf(fd, "%[^\n]\n", header_array[++num_lines]);
} while(err != EOF && num_lines < 10);
err = sscanf(header_array[5], "Total SNP %d", &NSNPS);
err = sscanf(header_array[7], "Total Sample %d", &NSAMPLES);
/***********************************************************/
/*************Getting Final Report Data***********************************/
printf("Getting final report data...\n");
//char** data_string;
data_string = (char**) malloc(NSNPS * NSAMPLES * sizeof(char*));
for(i = 0; i < NSNPS*NSAMPLES; i++){
data_string[i] = (char*)malloc(100);
}
num_lines = -1;
do {
err = fscanf(fd, "%[^\n]\n", data_string[++num_lines]);
} while(err != EOF && num_lines < NSNPS*NSAMPLES);
fclose(fd);
/**************************************************************************/
/************************Getting MapFile Data******************************/
printf("Getting mapfile data...\n");
//char** snps_data;
char* junk = (char*) malloc(50 * sizeof(char));
snps_data = (char**) malloc(NSNPS * sizeof(char*));
for(i = 0; i < NSNPS; i++){
snps_data[i] = (char*)malloc(100);
}
fd = fopen(map_path, "r");
int num_lines2 = -1;
err = fscanf(fd, "%[^\n]\n", junk);
do {
err = fscanf(fd, "%[^\n]\n", snps_data[++num_lines2]);
} while(err != EOF && num_lines2 < NSNPS);
free(junk);
fclose(fd);
/**************************************************************************/
}
/*************functions for the radix sort**********************************/
__device__ void radixsort(SNP* snps, Sample* samples){
for(int i = 0; i < 64; i++){
sort_by_bit(snps, samples, i);
__syncthreads();
}
}
__device__ void sort_by_bit(SNP* snps, Sample* samples, int bit){
int i = threadIdx.x;
int size = blockDim.x;
int index;
/***temperary variables for the snps*****/
long t_pos = snps->pos[i];
char* t_name = snps->name[i];
char t_chrom_c = snps->chrom_c[i];
//char* t_rest = snps->rest[i];
Sample t_sample = samples[i];
int p_i = (t_pos >> bit) & 1;
snps->pos[i] = p_i;
__syncthreads();
int ones_before = scan(snps->pos);
int ones_total = snps->pos[size -1];
int zeros_total = size - ones_total;
__syncthreads();
if(p_i)
index = ones_before - 1 + zeros_total;
else
index = i - ones_before;
snps->pos[index] = t_pos;
snps->name[index] = t_name;
snps->chrom_c[index] = t_chrom_c;
//snps->rest[index] = t_rest;
samples[index] = t_sample;
}
/**************************************************************************/
__device__ long scan(long* x){
int i = threadIdx.x;
int n = blockDim.x;
int offset;
for ( offset = 1; offset < n; offset *= 2){
long temp;
if (i >= offset)
temp = x[i-offset];
__syncthreads();
if(i >= offset)
x[i] = temp + x[i];
__syncthreads();
}
return x[i];
}
void parse(SNP* snps, Sample* animals, char** data_string, char** snp_data){
int i, j, err;
snps->name = (char**) malloc(NSNPS * sizeof(char*));
snps->chrom_c = (char*) malloc(NSNPS * sizeof(char));
snps->pos = (long*) malloc(NSNPS * sizeof(long));
for(i = 0; i < NSNPS; i++)
snps->name[i] = (char*) malloc(50 * sizeof(char));
animals = (Sample*) malloc(NSNPS * sizeof(Sample));
for(i = 0; i < NSNPS; i++){
animals[i].snp_name = (char*) malloc(50 * sizeof(char));
animals[i].a_id = (int*) malloc(NSAMPLES * sizeof(int));
animals[i].ab1 = (char*) malloc(NSAMPLES * sizeof(char));
animals[i].ab2 = (char*) malloc(NSAMPLES * sizeof(char));
animals[i].ab = (int*) malloc(NSAMPLES * sizeof(char));
}
for (i = 0; i < NSNPS; i++){
err = sscanf(snp_data[i], "%*d %s %c %ld %*s",
snps->name[i], snps->chrom_c[i], snps->pos[i], snps->rest[i]);
}
for(i = 0; i < NSNPS; i++){
for(j = 0; j < NSAMPLES; j++)
err = sscanf(data_string[i], "%s/t%d/t%*c/t%*c/t%*c/t%*c/t%c/t%c/t%*s",
animals[i].snp_name, animals[i].a_id[j], animals[i].ab1[j], animals[i].ab2[j]);
}
}
__global__ void sort(SNP* snps, Sample* samples, int nsamples){
int id = threadIdx.x;
radixsort(snps, samples);
for(int i = 0; i < nsamples; i++){
if (samples[id].ab1[i] == 'A' && samples[id].ab2[i] == 'A'){
samples[id].ab[i] = 1;
}else if(samples[id].ab1[i] == 'B' && samples[id].ab2[i] == 'B'){
samples[id].ab[i] = 2;
}else{
samples[id].ab[i] = 3;
}
}
}
int main(int argc, char** argv){
printf("Begin.\n");
SNP h_snps;
/*
typedef struct{
char** name;
char* chrom_c;
//int* chrom;
long* pos;
//long* c_pos;
//char** rest;
}SNP;
*/
Sample* h_samples;
/*
typedef struct{
char* snp_name;
int* a_id; //length is the number of animals
char* ab1;
char* ab2;
int* ab;
}Sample;
*/
//char map_path[], snp_path[];
char** data_string, **snps_data;
char** d_name;
char* d_chrom_c;
long* d_pos;
printf("Reading files...\n");
//map_path = argv[1];
char map_path[] = "./sample-files/test-files/SNP_Map_Truncated.txt";
//snp_path = argv[2];
char snp_path[] = "./sample-files/test-files/FinalReport_Truncated.txt";
read_files(map_path, snp_path, data_string, snps_data);
printf("Files read.\nParsing...\n");
parse(&h_snps, h_samples, data_string, snps_data);
printf("Data parsed.\n");
free(data_string);
free(snps_data);
printf("Allocating CUDA memory...\n");
hipMalloc((void**)&(d_pos), sizeof(long)*NSNPS);
hipMalloc((void**)&(d_chrom_c), sizeof(char)*NSNPS);
hipMalloc((void**)d_name, sizeof(char*)*NSNPS);
hipMemcpy(d_pos, (h_snps.pos), sizeof(long)*NSNPS, hipMemcpyHostToDevice);
hipMemcpy(d_chrom_c, (h_snps.chrom_c), sizeof(char)*NSNPS, hipMemcpyHostToDevice);
hipMemcpy(d_chrom_c, (h_snps.chrom_c), sizeof(char)*NSNPS, hipMemcpyHostToDevice);
}
| 31edf7a5a7ed53001f3960f57ad6df570ca93d05.cu | #include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <sys/resource.h>
typedef struct{
char** name;
char* chrom_c;
//int* chrom;
long* pos;
//long* c_pos;
char** rest;
}SNP;
typedef struct{
char* snp_name;
int* a_id; //length is the number of animals
char* ab1;
char* ab2;
int* ab;
}Sample;
int NSNPS;
int NSAMPLES;
__device__ void sort_by_bit(SNP* snps, Sample* samples, int bit);
__device__ long scan(long* x);
void read_files(char* map_path, char* snp_path, char** data_string, char** snps_data){
FILE *fd;
int err;
int num_lines = -1;
char** header_array;
int i;
/***********************Allocate string for header info**********/
printf("Allocating string for header array...\n");
header_array = (char**) malloc( 10 * sizeof(char*));
for(i = 0; i < 10; i++){
header_array[i] = (char*)malloc(100);
}
/*****************************************************************/
fd = fopen(snp_path, "r");
/*******Getting number of SNP and Sample from header****/
printf("Getting number of SNPs and Samples from header...\n");
do {
err = fscanf(fd, "%[^\n]\n", header_array[++num_lines]);
} while(err != EOF && num_lines < 10);
err = sscanf(header_array[5], "Total SNP %d", &NSNPS);
err = sscanf(header_array[7], "Total Sample %d", &NSAMPLES);
/***********************************************************/
/*************Getting Final Report Data***********************************/
printf("Getting final report data...\n");
//char** data_string;
data_string = (char**) malloc(NSNPS * NSAMPLES * sizeof(char*));
for(i = 0; i < NSNPS*NSAMPLES; i++){
data_string[i] = (char*)malloc(100);
}
num_lines = -1;
do {
err = fscanf(fd, "%[^\n]\n", data_string[++num_lines]);
} while(err != EOF && num_lines < NSNPS*NSAMPLES);
fclose(fd);
/**************************************************************************/
/************************Getting MapFile Data******************************/
printf("Getting mapfile data...\n");
//char** snps_data;
char* junk = (char*) malloc(50 * sizeof(char));
snps_data = (char**) malloc(NSNPS * sizeof(char*));
for(i = 0; i < NSNPS; i++){
snps_data[i] = (char*)malloc(100);
}
fd = fopen(map_path, "r");
int num_lines2 = -1;
err = fscanf(fd, "%[^\n]\n", junk);
do {
err = fscanf(fd, "%[^\n]\n", snps_data[++num_lines2]);
} while(err != EOF && num_lines2 < NSNPS);
free(junk);
fclose(fd);
/**************************************************************************/
}
/*************functions for the radix sort**********************************/
__device__ void radixsort(SNP* snps, Sample* samples){
for(int i = 0; i < 64; i++){
sort_by_bit(snps, samples, i);
__syncthreads();
}
}
__device__ void sort_by_bit(SNP* snps, Sample* samples, int bit){
int i = threadIdx.x;
int size = blockDim.x;
int index;
/***temperary variables for the snps*****/
long t_pos = snps->pos[i];
char* t_name = snps->name[i];
char t_chrom_c = snps->chrom_c[i];
//char* t_rest = snps->rest[i];
Sample t_sample = samples[i];
int p_i = (t_pos >> bit) & 1;
snps->pos[i] = p_i;
__syncthreads();
int ones_before = scan(snps->pos);
int ones_total = snps->pos[size -1];
int zeros_total = size - ones_total;
__syncthreads();
if(p_i)
index = ones_before - 1 + zeros_total;
else
index = i - ones_before;
snps->pos[index] = t_pos;
snps->name[index] = t_name;
snps->chrom_c[index] = t_chrom_c;
//snps->rest[index] = t_rest;
samples[index] = t_sample;
}
/**************************************************************************/
__device__ long scan(long* x){
int i = threadIdx.x;
int n = blockDim.x;
int offset;
for ( offset = 1; offset < n; offset *= 2){
long temp;
if (i >= offset)
temp = x[i-offset];
__syncthreads();
if(i >= offset)
x[i] = temp + x[i];
__syncthreads();
}
return x[i];
}
void parse(SNP* snps, Sample* animals, char** data_string, char** snp_data){
int i, j, err;
snps->name = (char**) malloc(NSNPS * sizeof(char*));
snps->chrom_c = (char*) malloc(NSNPS * sizeof(char));
snps->pos = (long*) malloc(NSNPS * sizeof(long));
for(i = 0; i < NSNPS; i++)
snps->name[i] = (char*) malloc(50 * sizeof(char));
animals = (Sample*) malloc(NSNPS * sizeof(Sample));
for(i = 0; i < NSNPS; i++){
animals[i].snp_name = (char*) malloc(50 * sizeof(char));
animals[i].a_id = (int*) malloc(NSAMPLES * sizeof(int));
animals[i].ab1 = (char*) malloc(NSAMPLES * sizeof(char));
animals[i].ab2 = (char*) malloc(NSAMPLES * sizeof(char));
animals[i].ab = (int*) malloc(NSAMPLES * sizeof(char));
}
for (i = 0; i < NSNPS; i++){
err = sscanf(snp_data[i], "%*d %s %c %ld %*s",
snps->name[i], snps->chrom_c[i], snps->pos[i], snps->rest[i]);
}
for(i = 0; i < NSNPS; i++){
for(j = 0; j < NSAMPLES; j++)
err = sscanf(data_string[i], "%s/t%d/t%*c/t%*c/t%*c/t%*c/t%c/t%c/t%*s",
animals[i].snp_name, animals[i].a_id[j], animals[i].ab1[j], animals[i].ab2[j]);
}
}
__global__ void sort(SNP* snps, Sample* samples, int nsamples){
int id = threadIdx.x;
radixsort(snps, samples);
for(int i = 0; i < nsamples; i++){
if (samples[id].ab1[i] == 'A' && samples[id].ab2[i] == 'A'){
samples[id].ab[i] = 1;
}else if(samples[id].ab1[i] == 'B' && samples[id].ab2[i] == 'B'){
samples[id].ab[i] = 2;
}else{
samples[id].ab[i] = 3;
}
}
}
int main(int argc, char** argv){
printf("Begin.\n");
SNP h_snps;
/*
typedef struct{
char** name;
char* chrom_c;
//int* chrom;
long* pos;
//long* c_pos;
//char** rest;
}SNP;
*/
Sample* h_samples;
/*
typedef struct{
char* snp_name;
int* a_id; //length is the number of animals
char* ab1;
char* ab2;
int* ab;
}Sample;
*/
//char map_path[], snp_path[];
char** data_string, **snps_data;
char** d_name;
char* d_chrom_c;
long* d_pos;
printf("Reading files...\n");
//map_path = argv[1];
char map_path[] = "./sample-files/test-files/SNP_Map_Truncated.txt";
//snp_path = argv[2];
char snp_path[] = "./sample-files/test-files/FinalReport_Truncated.txt";
read_files(map_path, snp_path, data_string, snps_data);
printf("Files read.\nParsing...\n");
parse(&h_snps, h_samples, data_string, snps_data);
printf("Data parsed.\n");
free(data_string);
free(snps_data);
printf("Allocating CUDA memory...\n");
cudaMalloc((void**)&(d_pos), sizeof(long)*NSNPS);
cudaMalloc((void**)&(d_chrom_c), sizeof(char)*NSNPS);
cudaMalloc((void**)d_name, sizeof(char*)*NSNPS);
cudaMemcpy(d_pos, (h_snps.pos), sizeof(long)*NSNPS, cudaMemcpyHostToDevice);
cudaMemcpy(d_chrom_c, (h_snps.chrom_c), sizeof(char)*NSNPS, cudaMemcpyHostToDevice);
cudaMemcpy(d_chrom_c, (h_snps.chrom_c), sizeof(char)*NSNPS, cudaMemcpyHostToDevice);
}
|
07495cad859cc5d2a02be420021ed0a654c09b7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "basic/math_function.h"
#include "device_launch_parameters.h"
namespace surfing
{
void surfing_gpu_memcpy(const size_t N, const void* X, void* Y)
{
if (X != Y)
{
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault));
}
}
template<>
void surfing_gpu_asum<float>(hipblasHandle_t handle, int N, const float *X, int incx, float *result)
{
CUBLAS_CHECK(hipblasSasum(handle, N, X, incx, result));
}
template<>
void surfing_gpu_asum<double>(hipblasHandle_t handle, int N, const double *X, int incx, double *result)
{
CUBLAS_CHECK(hipblasDasum(handle, N, X, incx, result));
}
template<>
void surfing_gpu_nrm2<float>(hipblasHandle_t handle, int N, const float *X, int incx, float *result)
{
CUBLAS_CHECK(hipblasSnrm2(handle, N, X, incx, result));
}
template<>
void surfing_gpu_nrm2<double>(hipblasHandle_t handle, int N, const double *X, int incx, double *result)
{
CUBLAS_CHECK(hipblasDnrm2(handle, N, X, incx, result));
}
template<>
void surfing_gpu_dot<float>(hipblasHandle_t handle, int N, const float *X, int incx, const float* Y, int incy, float *result)
{
CUBLAS_CHECK(hipblasSdot(handle, N, X, incx,Y,incy,result));
}
template<>
void surfing_gpu_dot<double>(hipblasHandle_t handle, int N, const double *X, int incx, const double* Y, int incy, double *result)
{
CUBLAS_CHECK(hipblasDdot(handle, N, X, incx, Y, incy, result));
}
template<>
void surfing_gpu_gemm<float>(hipblasHandle_t handle, hipblasOperation_t transa,
hipblasOperation_t transb, int m, int n, int k,
const float *alpha, const float *A, int lda,
const float *B, int ldb,
const float *beta, float *C, int ldc)
{
CUBLAS_CHECK(hipblasSgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc));
}
template<>
void surfing_gpu_gemm<double>(hipblasHandle_t handle, hipblasOperation_t transa,
hipblasOperation_t transb, int m, int n, int k,
const double *alpha, const double *A, int lda,
const double *B, int ldb,
const double *beta, double *C, int ldc)
{
CUBLAS_CHECK(hipblasDgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc));
}
template<>
void surfing_gpu_geam<float>(hipblasHandle_t handle, hipblasOperation_t transa,
hipblasOperation_t transb, int m, int n,
const float *alpha, const float *A, int lda,
const float *beta, const float *B, int ldb,
float *C, int ldc)
{
CUBLAS_CHECK(hipblasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc));
}
template<>
void surfing_gpu_geam<double>(hipblasHandle_t handle, hipblasOperation_t transa,
hipblasOperation_t transb, int m, int n,
const double *alpha, const double *A, int lda,
const double *beta, const double *B, int ldb,
double *C, int ldc)
{
CUBLAS_CHECK(hipblasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc));
}
template <>
void surfing_gpu_gemv<float>(hipblasHandle_t handle, hipblasOperation_t trans,
int m, int n, const float* alpha,
const float* A, int lda,
const float* x, int incx,
const float* beta, float* y, int incy)
{
CUBLAS_CHECK(hipblasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy));
}
template <>
void surfing_gpu_gemv<double>(hipblasHandle_t handle, hipblasOperation_t trans,
int m, int n, const double* alpha,
const double* A, int lda,
const double* x, int incx,
const double* beta, double* y, int incy)
{
CUBLAS_CHECK(hipblasDgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy));
}
template<>
void surfing_gpu_max<float>(hipblasHandle_t handle, int n, const float* X, int incx, int* result)
{
CUBLAS_CHECK(hipblasIsamax(handle, n, X, incx, result));
}
template<>
void surfing_gpu_max<double>(hipblasHandle_t handle, int n, const double * X, int incx, int* result)
{
CUBLAS_CHECK(hipblasIdamax(handle, n, X, incx, result));
}
template<>
void surfing_gpu_axpy<float>(hipblasHandle_t handle, int N, const float* alpha,
const float* X, int incx, float* Y, int incy)
{
CUBLAS_CHECK(hipblasSaxpy(handle, N, alpha, X, incx, Y, incy));
}
template<>
void surfing_gpu_axpy<double>(hipblasHandle_t handle, int N, const double* alpha,
const double* X, int incx, double* Y, int incy)
{
CUBLAS_CHECK(hipblasDaxpy(handle, N, alpha, X, incx, Y, incy));
}
template<>
void surfing_gpu_scal<float>(hipblasHandle_t handle, int N, const float* alpha, float* X, int incx)
{
CUBLAS_CHECK(hipblasSscal(handle, N, alpha, X, incx));
}
template<>
void surfing_gpu_scal<double>(hipblasHandle_t handle, int N, const double* alpha, double* X, int incx)
{
CUBLAS_CHECK(hipblasDscal(handle, N, alpha, X, incx));
}
template<>
void surfing_gpu_axpby<float>(hipblasHandle_t handle, int N, const float* alpha, const float* X, const float* beta, float* Y)
{
surfing_gpu_scal<float>(handle, N, beta, Y, 1);
surfing_gpu_axpy<float>(handle, N, alpha, X, 1, Y, 1);
}
template<>
void surfing_gpu_axpby <double>(hipblasHandle_t handle, int N, const double* alpha, const double* X, const double* beta, double* Y)
{
surfing_gpu_scal<double>(handle, N, beta, Y, 1);
surfing_gpu_axpy<double>(handle, N, alpha, X, 1, Y, 1);
}
template <typename Dtype>
__global__ void set_kernel(const int N, const Dtype alpha, Dtype* X)
{
CUDA_KERNEL_LOOP(index, N)
{
X[index] = alpha;
}
}
template <>
void surfing_gpu_set<float>(const int N, const float alpha, float *X)
{
hipLaunchKernelGGL(( set_kernel<float>) , dim3(SURFING_GET_BLOCK(N)), dim3(SURFING_CUDA_NUM_THREADS) , 0, 0, N, alpha, X);
}
template <>
void surfing_gpu_set(const int N, const double alpha, double *X)
{
hipLaunchKernelGGL(( set_kernel<double>) , dim3(SURFING_GET_BLOCK(N)), dim3(SURFING_CUDA_NUM_THREADS) , 0, 0, N, alpha, X);
}
template <typename Dtype>
__global__ void add_scalar_kernel(const int N, const Dtype alpha, Dtype* X)
{
CUDA_KERNEL_LOOP(index, N)
{
X[index] += alpha;
}
}
template <>
void surfing_gpu_add_scalar<float>(const int N, const float alpha, float *X)
{
hipLaunchKernelGGL(( add_scalar_kernel<float>) , dim3(SURFING_GET_BLOCK(N)), dim3(SURFING_CUDA_NUM_THREADS) , 0, 0, N, alpha, X);
}
template <>
void surfing_gpu_add_scalar<double>(const int N, const double alpha, double *X)
{
hipLaunchKernelGGL(( add_scalar_kernel<double>) , dim3(SURFING_GET_BLOCK(N)), dim3(SURFING_CUDA_NUM_THREADS) , 0, 0, N, alpha, X);
}
__global__ void rounding_kernel(const int N, unsigned int range, unsigned int * X)
{
CUDA_KERNEL_LOOP(index, N)
{
X[index] %= range;
}
}
void surfing_gpu_rounding(const int N, unsigned int range, unsigned int * X)
{
hipLaunchKernelGGL(( rounding_kernel), dim3(SURFING_GET_BLOCK(N)), dim3(SURFING_CUDA_NUM_THREADS) , 0, 0, N, range, X);
}
}
| 07495cad859cc5d2a02be420021ed0a654c09b7b.cu | #include "basic/math_function.h"
#include "device_launch_parameters.h"
namespace surfing
{
void surfing_gpu_memcpy(const size_t N, const void* X, void* Y)
{
if (X != Y)
{
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault));
}
}
template<>
void surfing_gpu_asum<float>(cublasHandle_t handle, int N, const float *X, int incx, float *result)
{
CUBLAS_CHECK(cublasSasum(handle, N, X, incx, result));
}
template<>
void surfing_gpu_asum<double>(cublasHandle_t handle, int N, const double *X, int incx, double *result)
{
CUBLAS_CHECK(cublasDasum(handle, N, X, incx, result));
}
template<>
void surfing_gpu_nrm2<float>(cublasHandle_t handle, int N, const float *X, int incx, float *result)
{
CUBLAS_CHECK(cublasSnrm2(handle, N, X, incx, result));
}
template<>
void surfing_gpu_nrm2<double>(cublasHandle_t handle, int N, const double *X, int incx, double *result)
{
CUBLAS_CHECK(cublasDnrm2(handle, N, X, incx, result));
}
template<>
void surfing_gpu_dot<float>(cublasHandle_t handle, int N, const float *X, int incx, const float* Y, int incy, float *result)
{
CUBLAS_CHECK(cublasSdot(handle, N, X, incx,Y,incy,result));
}
template<>
void surfing_gpu_dot<double>(cublasHandle_t handle, int N, const double *X, int incx, const double* Y, int incy, double *result)
{
CUBLAS_CHECK(cublasDdot(handle, N, X, incx, Y, incy, result));
}
template<>
void surfing_gpu_gemm<float>(cublasHandle_t handle, cublasOperation_t transa,
cublasOperation_t transb, int m, int n, int k,
const float *alpha, const float *A, int lda,
const float *B, int ldb,
const float *beta, float *C, int ldc)
{
CUBLAS_CHECK(cublasSgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc));
}
template<>
void surfing_gpu_gemm<double>(cublasHandle_t handle, cublasOperation_t transa,
cublasOperation_t transb, int m, int n, int k,
const double *alpha, const double *A, int lda,
const double *B, int ldb,
const double *beta, double *C, int ldc)
{
CUBLAS_CHECK(cublasDgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc));
}
template<>
void surfing_gpu_geam<float>(cublasHandle_t handle, cublasOperation_t transa,
cublasOperation_t transb, int m, int n,
const float *alpha, const float *A, int lda,
const float *beta, const float *B, int ldb,
float *C, int ldc)
{
CUBLAS_CHECK(cublasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc));
}
template<>
void surfing_gpu_geam<double>(cublasHandle_t handle, cublasOperation_t transa,
cublasOperation_t transb, int m, int n,
const double *alpha, const double *A, int lda,
const double *beta, const double *B, int ldb,
double *C, int ldc)
{
CUBLAS_CHECK(cublasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc));
}
template <>
void surfing_gpu_gemv<float>(cublasHandle_t handle, cublasOperation_t trans,
int m, int n, const float* alpha,
const float* A, int lda,
const float* x, int incx,
const float* beta, float* y, int incy)
{
CUBLAS_CHECK(cublasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy));
}
template <>
void surfing_gpu_gemv<double>(cublasHandle_t handle, cublasOperation_t trans,
int m, int n, const double* alpha,
const double* A, int lda,
const double* x, int incx,
const double* beta, double* y, int incy)
{
CUBLAS_CHECK(cublasDgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy));
}
template<>
void surfing_gpu_max<float>(cublasHandle_t handle, int n, const float* X, int incx, int* result)
{
CUBLAS_CHECK(cublasIsamax(handle, n, X, incx, result));
}
template<>
void surfing_gpu_max<double>(cublasHandle_t handle, int n, const double * X, int incx, int* result)
{
CUBLAS_CHECK(cublasIdamax(handle, n, X, incx, result));
}
template<>
void surfing_gpu_axpy<float>(cublasHandle_t handle, int N, const float* alpha,
const float* X, int incx, float* Y, int incy)
{
CUBLAS_CHECK(cublasSaxpy(handle, N, alpha, X, incx, Y, incy));
}
template<>
void surfing_gpu_axpy<double>(cublasHandle_t handle, int N, const double* alpha,
const double* X, int incx, double* Y, int incy)
{
CUBLAS_CHECK(cublasDaxpy(handle, N, alpha, X, incx, Y, incy));
}
template<>
void surfing_gpu_scal<float>(cublasHandle_t handle, int N, const float* alpha, float* X, int incx)
{
CUBLAS_CHECK(cublasSscal(handle, N, alpha, X, incx));
}
template<>
void surfing_gpu_scal<double>(cublasHandle_t handle, int N, const double* alpha, double* X, int incx)
{
CUBLAS_CHECK(cublasDscal(handle, N, alpha, X, incx));
}
template<>
void surfing_gpu_axpby<float>(cublasHandle_t handle, int N, const float* alpha, const float* X, const float* beta, float* Y)
{
surfing_gpu_scal<float>(handle, N, beta, Y, 1);
surfing_gpu_axpy<float>(handle, N, alpha, X, 1, Y, 1);
}
template<>
void surfing_gpu_axpby <double>(cublasHandle_t handle, int N, const double* alpha, const double* X, const double* beta, double* Y)
{
surfing_gpu_scal<double>(handle, N, beta, Y, 1);
surfing_gpu_axpy<double>(handle, N, alpha, X, 1, Y, 1);
}
template <typename Dtype>
__global__ void set_kernel(const int N, const Dtype alpha, Dtype* X)
{
CUDA_KERNEL_LOOP(index, N)
{
X[index] = alpha;
}
}
template <>
void surfing_gpu_set<float>(const int N, const float alpha, float *X)
{
set_kernel<float> <<<SURFING_GET_BLOCK(N), SURFING_CUDA_NUM_THREADS >>>(N, alpha, X);
}
template <>
void surfing_gpu_set(const int N, const double alpha, double *X)
{
set_kernel<double> <<<SURFING_GET_BLOCK(N), SURFING_CUDA_NUM_THREADS >>>(N, alpha, X);
}
template <typename Dtype>
__global__ void add_scalar_kernel(const int N, const Dtype alpha, Dtype* X)
{
CUDA_KERNEL_LOOP(index, N)
{
X[index] += alpha;
}
}
template <>
void surfing_gpu_add_scalar<float>(const int N, const float alpha, float *X)
{
add_scalar_kernel<float> <<<SURFING_GET_BLOCK(N), SURFING_CUDA_NUM_THREADS >>>(N, alpha, X);
}
template <>
void surfing_gpu_add_scalar<double>(const int N, const double alpha, double *X)
{
add_scalar_kernel<double> <<<SURFING_GET_BLOCK(N), SURFING_CUDA_NUM_THREADS >>>(N, alpha, X);
}
__global__ void rounding_kernel(const int N, unsigned int range, unsigned int * X)
{
CUDA_KERNEL_LOOP(index, N)
{
X[index] %= range;
}
}
void surfing_gpu_rounding(const int N, unsigned int range, unsigned int * X)
{
rounding_kernel<<<SURFING_GET_BLOCK(N), SURFING_CUDA_NUM_THREADS >>>(N, range, X);
}
}
|
4f442996f3e026d583539ff49dfb396f70b98b81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
__global__ void sync_pool_back()
{ }
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = 0.;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask)
{
top_mask = top[1]->mutable_gpu_data();
}
else
{
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream_[0],
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
//
hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream_[0],
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS),0,stream_[0],
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS),0,stream_[0],
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
//sync_conv_groups<<<1, 1>>>();
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask)
{
top_mask = top[1]->gpu_data();
} else
{
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream_[0],
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream_[0],
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream_[0],
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
//sync_pool_back<<<1, 1>>>();
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
| 4f442996f3e026d583539ff49dfb396f70b98b81.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
__global__ void sync_pool_back()
{ }
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = 0.;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask)
{
top_mask = top[1]->mutable_gpu_data();
}
else
{
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,0,stream_[0]>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
// 我考虑在这这个里面加入我的流的设计。
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,0,stream_[0]>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS,0,stream_[0]>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS,0,stream_[0]>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
//sync_conv_groups<<<1, 1>>>();
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask)
{
top_mask = top[1]->gpu_data();
} else
{
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,0,stream_[0]>>>(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,0,stream_[0]>>>(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,0,stream_[0]>>>(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
//sync_pool_back<<<1, 1>>>();
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
|
andromeda_nb.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This program is a CUDA C program simulating the N-body system
* of two galaxies as PHY 241 FINAL PROJECTS
*
*/
/*
* TODO:(*for final project)
* 1. andromeda
* 2. report
* 3. presentation
* *4. N-body galaxy code-generat 10^11 particles
* *5. MatLab write a function to track the distance between Milkway and Andromeda
* *6. change accel function to the N-body one.
* *7. print mass[i]. because the halo is dark matter. Or better way distinguish dark matter and rings?
*/
#include <hip/hip_runtime.h>
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define PI 3.14159265
#define BUFFERSIZE 500
#define BLOCKSIZE 256
#define G 1.0
#define MASS_1 38.2352941
#define RMIN (7.733/4.5)
#define SOFTPARAMETER 0.000001
#define AndromedaXOffsetP -41.0882
#define AndromedaYOffsetP 68.3823
#define AndromedaZOffsetP -33.8634
#define AndromedaXOffsetV 0.0420
#define AndromedaYOffsetV -0.2504
#define AndromedaZOffsetV 0.1240
#define MilkwayXOffsetP 41.0882
#define MilkwayYOffsetP -68.3823
#define MilkwayZOffsetP 33.8634
#define MilkwayXOffsetV -0.0420
#define MilkwayYOffsetV 0.2504
#define MilkwayZOffsetV -0.1240
// Headers
void rotate(double* x, double* y, double *z, double n1, double n2, double n3, double theta);
__global__ void leapstep(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double dt);
__global__ void accel(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double* mass, double dt);
__global__ void printstate(double *x, double *y, double *z, unsigned long tnow);
void initialCondition_host_file(char *input1, char *input2, double **x, double **y, double **z, double **vx, double **vy, double **vz, double **mass, unsigned long *size);
void read_size_from_file(char *input, unsigned long *size) ;
/** Main function **/
int main(int argc, char *argv[]) {
/*
* Handling commandline inputs and setting initial value of the arguments
* 1. number of steps (mstep)
* 2. warp (nout)
* 3. offset (start printing position)
* 4. timestamp (dt)
*
*/
unsigned long mstep, nout, offset, tnow = 0, n;
double dt, *x, *y, *z, *vx, *vy, *vz, *mass;
mstep = (argc > 1) ? atoi(argv[1]) : 100;
nout = (argc > 2) ? atoi(argv[2]) : 1;
offset = (argc > 3) ? atoi(argv[3]) : 0;
dt = (argc > 4) ? atof(argv[4]) : (2.0 * PI * RMIN * RMIN) / (sqrt(G * MASS_1) * 40.0);
initialCondition_host_file("milky_way.dat", "andromeda.dat", &x, &y, &z, &vx, &vy, &vz, &mass, &n);
unsigned long grids = ceil((double)n / BLOCKSIZE), threads = BLOCKSIZE;
/*
* Use hipDeviceSetLimit() to change the buffer size of printf
* used in kernel functions to solve the problem encountered before:
* cannot print more than 4096 lines of data using printf
*
*/
hipDeviceSetLimit(hipLimitPrintfFifoSize, n * BUFFERSIZE);
/* Start looping steps from first step to mstep */
for (unsigned long i = 0; i < offset; i++, tnow++){
hipLaunchKernelGGL(( accel), dim3(grids), dim3(BLOCKSIZE), 0, 0, n, x, y, z, vx, vy, vz, mass, dt);
hipDeviceSynchronize();
hipLaunchKernelGGL(( leapstep), dim3(grids), dim3(BLOCKSIZE), 0, 0, n, x, y, z, vx, vy, vz, dt);
hipDeviceSynchronize();
hipLaunchKernelGGL(( accel), dim3(grids), dim3(BLOCKSIZE), 0, 0, n, x, y, z, vx, vy, vz, mass, dt);
hipDeviceSynchronize();
}
for (unsigned long i = offset; i < mstep; i++, tnow++) {
if(i % nout == 0) {
hipLaunchKernelGGL(( printstate), dim3(grids), dim3(threads), 0, 0, x, y, z, tnow);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( accel), dim3(grids), dim3(BLOCKSIZE), 0, 0, n, x, y, z, vx, vy, vz, mass, dt);
hipDeviceSynchronize();
hipLaunchKernelGGL(( leapstep), dim3(grids), dim3(BLOCKSIZE), 0, 0, n, x, y, z, vx, vy, vz, dt);
hipDeviceSynchronize();
hipLaunchKernelGGL(( accel), dim3(grids), dim3(BLOCKSIZE), 0, 0, n, x, y, z, vx, vy, vz, mass, dt);
hipDeviceSynchronize();
}
if(mstep % nout == 0) {
hipLaunchKernelGGL(( printstate), dim3(grids), dim3(BLOCKSIZE), 0, 0, x, y, z, tnow);
}
hipDeviceSynchronize();
// After finishing, free the allocated memory
hipFree(x);
// Exit the current thread
return 0;
}
void rotate(double* x, double* y, double *z, double n1, double n2, double n3, double theta) {
double sigma = -theta;
double c = cos(sigma);
double s = sin(sigma);
double a = 1 - cos(sigma);
double tmpx = ( a * n1 * n1 + c ) * (*x) + ( a * n1 * n2 - s * n3 ) * (*y) + ( a * n1 * n3 + s * n2 ) * (*z);
double tmpy = ( a * n1 * n2 + s * n3 ) * (*x) + ( a * n2 * n2 + c ) * (*y) + ( a * n2 * n3 - s * n1 ) * (*z);
double tmpz = ( a * n1 * n3 - s * n2 ) * (*x) + ( a * n2 * n3 + s * n1 ) * (*y) + ( a * n3 * n3 + c ) * (*z);
(*x) = tmpx;
(*y) = tmpy;
(*z) = tmpz;
}
__global__ void leapstep(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double dt) {
const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x;
if (serial < n){
x[serial] += dt * vx[serial];
y[serial] += dt * vy[serial];
z[serial] += dt * vz[serial];
}
}
__global__ void accel(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double* mass, double dt) {
const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned long tdx = threadIdx.x;
__shared__ double lx[BLOCKSIZE];
__shared__ double ly[BLOCKSIZE];
__shared__ double lz[BLOCKSIZE];
__shared__ double lm[BLOCKSIZE];
double ax = 0.0, ay = 0.0, az = 0.0;
double norm;
double thisX, thisY, thisZ;
if (serial < n) {
thisX = x[serial];
thisY = y[serial];
thisZ = z[serial];
}
for (unsigned long i = 0; i < gridDim.x; i++) {
unsigned long index = i * blockDim.x + tdx;
if (index < n) {
// Copy data from main memory
lx[tdx] = x[index];
lz[tdx] = y[index];
ly[tdx] = z[index];
lm[tdx] = mass[index];
}
__syncthreads();
// Accumulates the acceleration
#pragma unroll
for (unsigned long j = 0; j < BLOCKSIZE; j++) {
unsigned long pos = i * blockDim.x + j;
if (pos >= n) {
continue;
}
norm = pow(SOFTPARAMETER + pow(thisX - lx[j], 2) + pow(thisY - ly[j], 2) + pow(thisZ - lz[j], 2), 1.5);
ax += - G * lm[j] * (thisX - lx[j]) / norm;
ay += - G * lm[j] * (thisY - ly[j]) / norm;
az += - G * lm[j] * (thisZ - lz[j]) / norm;
}
__syncthreads();
}
if (serial < n) {
vx[serial] += 0.5 * dt * ax;
vy[serial] += 0.5 * dt * ay;
vz[serial] += 0.5 * dt * az;
}
}
__global__ void printstate(double *x, double *y, double *z, unsigned long tnow) {
const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x;
if(serial < 10000 || (serial >= 44000 && serial < 54000)){
printf("%d,%12.6lf,%12.6lf,%12.6lf,%d\n", serial, x[serial], y[serial], z[serial], tnow);
}
}
void initialCondition_host_file(char *input1, char *input2, double **x, double **y, double **z, double **vx, double **vy, double **vz, double **mass, unsigned long *size) {
unsigned long s1, s2;
read_size_from_file(input1, &s1);
(*size) = s1;
read_size_from_file(input2, &s2);
(*size) += s2;
unsigned long numOfBlocks = ceil(((double)(*size)) / BLOCKSIZE);
// Initial local data array
double *lx, *ly, *lz, *lvx, *lvy, *lvz, *lm;
lx = (double*) malloc(7 * numOfBlocks * BLOCKSIZE * sizeof(double));
ly = lx + numOfBlocks * BLOCKSIZE;
lz = ly + numOfBlocks * BLOCKSIZE;
lvx = lz + numOfBlocks * BLOCKSIZE;
lvy = lvx + numOfBlocks * BLOCKSIZE;
lvz = lvy + numOfBlocks * BLOCKSIZE;
lm = lvz + numOfBlocks * BLOCKSIZE;
// Read data from file1
FILE *fp = fopen(input1, "r");
if(fp == NULL){
printf("Error: fail to open file 1\n");
exit(-1);
}
unsigned long count = 0;
// Skip first galaxy
unsigned long junk1;
double junk2;
fscanf(fp, "%lu %lf\n", &junk1, &junk2);
double omega = 0.0;
double sigma = PI / 2.0;
while((!feof(fp)) && (count < s1)){
fscanf(fp, "%lf %lf %lf %lf %lf %lf %lf\n", lm + count, lx + count, ly + count, lz + count, lvx + count, lvy + count, lvz + count);
rotate(lx + count, ly + count, lz + count, cos(omega), sin(omega), 0, sigma);
rotate(lvx + count, lvy + count, lvz + count, cos(omega), sin(omega), 0, sigma);
*(lx + count) += MilkwayXOffsetP;
*(ly + count) += MilkwayYOffsetP;
*(lz + count) += MilkwayZOffsetP;
*(lvx + count) += MilkwayXOffsetV;
*(lvy + count) += MilkwayYOffsetV;
*(lvz + count) += MilkwayZOffsetV;
count++;
}
fclose(fp);
// Read data from file2
fp = fopen(input2, "r");
if(fp == NULL){
printf("Error: fail to open file 2\n");
exit(-1);
}
// Skip first line
fscanf(fp, "%lu %lf\n", &junk1, &junk2);
omega = - 2.0 * PI / 3.0;
sigma = PI / 6.0;
while((!feof(fp)) && (count < (*size))){
fscanf(fp, "%lf %lf %lf %lf %lf %lf %lf\n", lm + count, lx + count, ly + count, lz + count, lvx + count, lvy + count, lvz + count);
rotate(lx + count, ly + count, lz + count, cos(omega), sin(omega), 0, sigma);
rotate(lvx + count, lvy + count, lvz + count, cos(omega), sin(omega), 0, sigma);
*(lx + count) += AndromedaXOffsetP;
*(ly + count) += AndromedaYOffsetP;
*(lz + count) += AndromedaZOffsetP;
*(lvx + count) += AndromedaXOffsetV;
*(lvy + count) += AndromedaYOffsetV;
*(lvz + count) += AndromedaZOffsetV;
count++;
}
fclose(fp);
// Allocate device memory
hipMalloc(x, 7 * numOfBlocks * BLOCKSIZE * sizeof(double));
(*y) = (*x) + numOfBlocks * BLOCKSIZE;
(*z) = (*y) + numOfBlocks * BLOCKSIZE;
(*vx) = (*z) + numOfBlocks * BLOCKSIZE;
(*vy) = (*vx) + numOfBlocks * BLOCKSIZE;
(*vz) = (*vy) + numOfBlocks * BLOCKSIZE;
(*mass) = (*vz) + numOfBlocks * BLOCKSIZE;
hipMemcpy((*x), lx, 7 * numOfBlocks * BLOCKSIZE * sizeof(double), hipMemcpyHostToDevice);
free(lx);
}
void read_size_from_file(char *input, unsigned long *size) {
FILE *fp = fopen(input, "r");
fscanf(fp, "%lu", size);
fclose(fp);
}
| andromeda_nb.cu | /*
* This program is a CUDA C program simulating the N-body system
* of two galaxies as PHY 241 FINAL PROJECTS
*
*/
/*
* TODO:(*for final project)
* 1. andromeda
* 2. report
* 3. presentation
* *4. N-body galaxy code-generat 10^11 particles
* *5. MatLab write a function to track the distance between Milkway and Andromeda
* *6. change accel function to the N-body one.
* *7. print mass[i]. because the halo is dark matter. Or better way distinguish dark matter and rings?
*/
#include <cuda.h>
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <curand.h>
#include <curand_kernel.h>
#define PI 3.14159265
#define BUFFERSIZE 500
#define BLOCKSIZE 256
#define G 1.0
#define MASS_1 38.2352941
#define RMIN (7.733/4.5)
#define SOFTPARAMETER 0.000001
#define AndromedaXOffsetP -41.0882
#define AndromedaYOffsetP 68.3823
#define AndromedaZOffsetP -33.8634
#define AndromedaXOffsetV 0.0420
#define AndromedaYOffsetV -0.2504
#define AndromedaZOffsetV 0.1240
#define MilkwayXOffsetP 41.0882
#define MilkwayYOffsetP -68.3823
#define MilkwayZOffsetP 33.8634
#define MilkwayXOffsetV -0.0420
#define MilkwayYOffsetV 0.2504
#define MilkwayZOffsetV -0.1240
// Headers
void rotate(double* x, double* y, double *z, double n1, double n2, double n3, double theta);
__global__ void leapstep(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double dt);
__global__ void accel(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double* mass, double dt);
__global__ void printstate(double *x, double *y, double *z, unsigned long tnow);
void initialCondition_host_file(char *input1, char *input2, double **x, double **y, double **z, double **vx, double **vy, double **vz, double **mass, unsigned long *size);
void read_size_from_file(char *input, unsigned long *size) ;
/** Main function **/
int main(int argc, char *argv[]) {
/*
* Handling commandline inputs and setting initial value of the arguments
* 1. number of steps (mstep)
* 2. warp (nout)
* 3. offset (start printing position)
* 4. timestamp (dt)
*
*/
unsigned long mstep, nout, offset, tnow = 0, n;
double dt, *x, *y, *z, *vx, *vy, *vz, *mass;
mstep = (argc > 1) ? atoi(argv[1]) : 100;
nout = (argc > 2) ? atoi(argv[2]) : 1;
offset = (argc > 3) ? atoi(argv[3]) : 0;
dt = (argc > 4) ? atof(argv[4]) : (2.0 * PI * RMIN * RMIN) / (sqrt(G * MASS_1) * 40.0);
initialCondition_host_file("milky_way.dat", "andromeda.dat", &x, &y, &z, &vx, &vy, &vz, &mass, &n);
unsigned long grids = ceil((double)n / BLOCKSIZE), threads = BLOCKSIZE;
/*
* Use cudaDeviceSetLimit() to change the buffer size of printf
* used in kernel functions to solve the problem encountered before:
* cannot print more than 4096 lines of data using printf
*
*/
cudaDeviceSetLimit(cudaLimitPrintfFifoSize, n * BUFFERSIZE);
/* Start looping steps from first step to mstep */
for (unsigned long i = 0; i < offset; i++, tnow++){
accel<<<grids, BLOCKSIZE>>> (n, x, y, z, vx, vy, vz, mass, dt);
cudaDeviceSynchronize();
leapstep<<<grids, BLOCKSIZE>>> (n, x, y, z, vx, vy, vz, dt);
cudaDeviceSynchronize();
accel<<<grids, BLOCKSIZE>>> (n, x, y, z, vx, vy, vz, mass, dt);
cudaDeviceSynchronize();
}
for (unsigned long i = offset; i < mstep; i++, tnow++) {
if(i % nout == 0) {
printstate<<<grids, threads>>> (x, y, z, tnow);
cudaDeviceSynchronize();
}
accel<<<grids, BLOCKSIZE>>> (n, x, y, z, vx, vy, vz, mass, dt);
cudaDeviceSynchronize();
leapstep<<<grids, BLOCKSIZE>>> (n, x, y, z, vx, vy, vz, dt);
cudaDeviceSynchronize();
accel<<<grids, BLOCKSIZE>>> (n, x, y, z, vx, vy, vz, mass, dt);
cudaDeviceSynchronize();
}
if(mstep % nout == 0) {
printstate<<<grids, BLOCKSIZE>>>(x, y, z, tnow);
}
cudaDeviceSynchronize();
// After finishing, free the allocated memory
cudaFree(x);
// Exit the current thread
return 0;
}
void rotate(double* x, double* y, double *z, double n1, double n2, double n3, double theta) {
double sigma = -theta;
double c = cos(sigma);
double s = sin(sigma);
double a = 1 - cos(sigma);
double tmpx = ( a * n1 * n1 + c ) * (*x) + ( a * n1 * n2 - s * n3 ) * (*y) + ( a * n1 * n3 + s * n2 ) * (*z);
double tmpy = ( a * n1 * n2 + s * n3 ) * (*x) + ( a * n2 * n2 + c ) * (*y) + ( a * n2 * n3 - s * n1 ) * (*z);
double tmpz = ( a * n1 * n3 - s * n2 ) * (*x) + ( a * n2 * n3 + s * n1 ) * (*y) + ( a * n3 * n3 + c ) * (*z);
(*x) = tmpx;
(*y) = tmpy;
(*z) = tmpz;
}
__global__ void leapstep(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double dt) {
const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x;
if (serial < n){
x[serial] += dt * vx[serial];
y[serial] += dt * vy[serial];
z[serial] += dt * vz[serial];
}
}
__global__ void accel(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double* mass, double dt) {
const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned long tdx = threadIdx.x;
__shared__ double lx[BLOCKSIZE];
__shared__ double ly[BLOCKSIZE];
__shared__ double lz[BLOCKSIZE];
__shared__ double lm[BLOCKSIZE];
double ax = 0.0, ay = 0.0, az = 0.0;
double norm;
double thisX, thisY, thisZ;
if (serial < n) {
thisX = x[serial];
thisY = y[serial];
thisZ = z[serial];
}
for (unsigned long i = 0; i < gridDim.x; i++) {
unsigned long index = i * blockDim.x + tdx;
if (index < n) {
// Copy data from main memory
lx[tdx] = x[index];
lz[tdx] = y[index];
ly[tdx] = z[index];
lm[tdx] = mass[index];
}
__syncthreads();
// Accumulates the acceleration
#pragma unroll
for (unsigned long j = 0; j < BLOCKSIZE; j++) {
unsigned long pos = i * blockDim.x + j;
if (pos >= n) {
continue;
}
norm = pow(SOFTPARAMETER + pow(thisX - lx[j], 2) + pow(thisY - ly[j], 2) + pow(thisZ - lz[j], 2), 1.5);
ax += - G * lm[j] * (thisX - lx[j]) / norm;
ay += - G * lm[j] * (thisY - ly[j]) / norm;
az += - G * lm[j] * (thisZ - lz[j]) / norm;
}
__syncthreads();
}
if (serial < n) {
vx[serial] += 0.5 * dt * ax;
vy[serial] += 0.5 * dt * ay;
vz[serial] += 0.5 * dt * az;
}
}
__global__ void printstate(double *x, double *y, double *z, unsigned long tnow) {
const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x;
if(serial < 10000 || (serial >= 44000 && serial < 54000)){
printf("%d,%12.6lf,%12.6lf,%12.6lf,%d\n", serial, x[serial], y[serial], z[serial], tnow);
}
}
void initialCondition_host_file(char *input1, char *input2, double **x, double **y, double **z, double **vx, double **vy, double **vz, double **mass, unsigned long *size) {
unsigned long s1, s2;
read_size_from_file(input1, &s1);
(*size) = s1;
read_size_from_file(input2, &s2);
(*size) += s2;
unsigned long numOfBlocks = ceil(((double)(*size)) / BLOCKSIZE);
// Initial local data array
double *lx, *ly, *lz, *lvx, *lvy, *lvz, *lm;
lx = (double*) malloc(7 * numOfBlocks * BLOCKSIZE * sizeof(double));
ly = lx + numOfBlocks * BLOCKSIZE;
lz = ly + numOfBlocks * BLOCKSIZE;
lvx = lz + numOfBlocks * BLOCKSIZE;
lvy = lvx + numOfBlocks * BLOCKSIZE;
lvz = lvy + numOfBlocks * BLOCKSIZE;
lm = lvz + numOfBlocks * BLOCKSIZE;
// Read data from file1
FILE *fp = fopen(input1, "r");
if(fp == NULL){
printf("Error: fail to open file 1\n");
exit(-1);
}
unsigned long count = 0;
// Skip first galaxy
unsigned long junk1;
double junk2;
fscanf(fp, "%lu %lf\n", &junk1, &junk2);
double omega = 0.0;
double sigma = PI / 2.0;
while((!feof(fp)) && (count < s1)){
fscanf(fp, "%lf %lf %lf %lf %lf %lf %lf\n", lm + count, lx + count, ly + count, lz + count, lvx + count, lvy + count, lvz + count);
rotate(lx + count, ly + count, lz + count, cos(omega), sin(omega), 0, sigma);
rotate(lvx + count, lvy + count, lvz + count, cos(omega), sin(omega), 0, sigma);
*(lx + count) += MilkwayXOffsetP;
*(ly + count) += MilkwayYOffsetP;
*(lz + count) += MilkwayZOffsetP;
*(lvx + count) += MilkwayXOffsetV;
*(lvy + count) += MilkwayYOffsetV;
*(lvz + count) += MilkwayZOffsetV;
count++;
}
fclose(fp);
// Read data from file2
fp = fopen(input2, "r");
if(fp == NULL){
printf("Error: fail to open file 2\n");
exit(-1);
}
// Skip first line
fscanf(fp, "%lu %lf\n", &junk1, &junk2);
omega = - 2.0 * PI / 3.0;
sigma = PI / 6.0;
while((!feof(fp)) && (count < (*size))){
fscanf(fp, "%lf %lf %lf %lf %lf %lf %lf\n", lm + count, lx + count, ly + count, lz + count, lvx + count, lvy + count, lvz + count);
rotate(lx + count, ly + count, lz + count, cos(omega), sin(omega), 0, sigma);
rotate(lvx + count, lvy + count, lvz + count, cos(omega), sin(omega), 0, sigma);
*(lx + count) += AndromedaXOffsetP;
*(ly + count) += AndromedaYOffsetP;
*(lz + count) += AndromedaZOffsetP;
*(lvx + count) += AndromedaXOffsetV;
*(lvy + count) += AndromedaYOffsetV;
*(lvz + count) += AndromedaZOffsetV;
count++;
}
fclose(fp);
// Allocate device memory
cudaMalloc(x, 7 * numOfBlocks * BLOCKSIZE * sizeof(double));
(*y) = (*x) + numOfBlocks * BLOCKSIZE;
(*z) = (*y) + numOfBlocks * BLOCKSIZE;
(*vx) = (*z) + numOfBlocks * BLOCKSIZE;
(*vy) = (*vx) + numOfBlocks * BLOCKSIZE;
(*vz) = (*vy) + numOfBlocks * BLOCKSIZE;
(*mass) = (*vz) + numOfBlocks * BLOCKSIZE;
cudaMemcpy((*x), lx, 7 * numOfBlocks * BLOCKSIZE * sizeof(double), cudaMemcpyHostToDevice);
free(lx);
}
void read_size_from_file(char *input, unsigned long *size) {
FILE *fp = fopen(input, "r");
fscanf(fp, "%lu", size);
fclose(fp);
}
|
e9aaf4fbf7f76806e89886eeb653753247803423.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define MAXBLOCKS 10
#define MAXTHREADS 1
//__global__ (paralellized method)
__global__ void VectorAdd(int *a, int *b, int*c, int n)
{
int i = blockIdx.x; //Assign each c element to a single block
c[i] = a[i] + b[i];
}
int main()
{
int *a, *b, *c; //CPU
int *d_a, *d_b, *d_c;//GPU
//Allocate CPU memory
a = (int*)malloc(MAXBLOCKS*sizeof(int));
b = (int*)malloc(MAXBLOCKS*sizeof(int));
c = (int*)malloc(MAXBLOCKS*sizeof(int));
//Allocate GPU memory
hipMalloc(&d_a, MAXBLOCKS*sizeof(int));
hipMalloc(&d_b, MAXBLOCKS*sizeof(int));
hipMalloc(&d_c, MAXBLOCKS*sizeof(int));
for (int i = 0; i < MAXBLOCKS; ++i) //Populate array
{
a[i] = i;
b[i] = i;
c[i] = 0;
}
//Copy data to GPU
hipMemcpy(d_a, a, MAXBLOCKS*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, MAXBLOCKS*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_c, c, MAXBLOCKS*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( VectorAdd), dim3(MAXBLOCKS), dim3(MAXTHREADS) , 0, 0, d_a, d_b, d_c, MAXBLOCKS); //Run GPU using MAXBLOCK number of blocks and MAXTHREADS number of threads
//Copy result back to CPU
hipMemcpy(c, d_c, MAXBLOCKS*sizeof(int), hipMemcpyDeviceToHost);
printf("\nMAXBLOCKS (%d) VECTOR ADDITION USING CUDA\n\n", MAXBLOCKS);
printf("c[i] = a[i] + b[i]\n");
printf("======================================\n");
for (int i = 0; i < MAXBLOCKS; ++i)
printf("a[%d] = %d, b[%d] = %d, c[%d] = %d\n", i, a[i], i, b[i], i, c[i]);
//Free CPU memory
free(a);
free(b);
free(c);
//Free GPU memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
} | e9aaf4fbf7f76806e89886eeb653753247803423.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define MAXBLOCKS 10
#define MAXTHREADS 1
//__global__ (paralellized method)
__global__ void VectorAdd(int *a, int *b, int*c, int n)
{
int i = blockIdx.x; //Assign each c element to a single block
c[i] = a[i] + b[i];
}
int main()
{
int *a, *b, *c; //CPU
int *d_a, *d_b, *d_c;//GPU
//Allocate CPU memory
a = (int*)malloc(MAXBLOCKS*sizeof(int));
b = (int*)malloc(MAXBLOCKS*sizeof(int));
c = (int*)malloc(MAXBLOCKS*sizeof(int));
//Allocate GPU memory
cudaMalloc(&d_a, MAXBLOCKS*sizeof(int));
cudaMalloc(&d_b, MAXBLOCKS*sizeof(int));
cudaMalloc(&d_c, MAXBLOCKS*sizeof(int));
for (int i = 0; i < MAXBLOCKS; ++i) //Populate array
{
a[i] = i;
b[i] = i;
c[i] = 0;
}
//Copy data to GPU
cudaMemcpy(d_a, a, MAXBLOCKS*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, MAXBLOCKS*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, MAXBLOCKS*sizeof(int), cudaMemcpyHostToDevice);
VectorAdd<<< MAXBLOCKS, MAXTHREADS >>>(d_a, d_b, d_c, MAXBLOCKS); //Run GPU using MAXBLOCK number of blocks and MAXTHREADS number of threads
//Copy result back to CPU
cudaMemcpy(c, d_c, MAXBLOCKS*sizeof(int), cudaMemcpyDeviceToHost);
printf("\nMAXBLOCKS (%d) VECTOR ADDITION USING CUDA\n\n", MAXBLOCKS);
printf("c[i] = a[i] + b[i]\n");
printf("======================================\n");
for (int i = 0; i < MAXBLOCKS; ++i)
printf("a[%d] = %d, b[%d] = %d, c[%d] = %d\n", i, a[i], i, b[i], i, c[i]);
//Free CPU memory
free(a);
free(b);
free(c);
//Free GPU memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
c4f79bb90cb33885b2a5104d0aeeabc33536e37d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void hello_cuda()
{
printf("Hello CUDA \n");
}
int main()
{
int nx,ny;
nx=16;
ny=4;
dim3 block(8,2,1);
dim3 grid(nx/block.x,ny/block.y);
hipLaunchKernelGGL(( hello_cuda), dim3(grid),dim3(block), 0, 0, );
hipDeviceSynchronize();
hipDeviceReset();
return 0;
} | c4f79bb90cb33885b2a5104d0aeeabc33536e37d.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void hello_cuda()
{
printf("Hello CUDA \n");
}
int main()
{
int nx,ny;
nx=16;
ny=4;
dim3 block(8,2,1);
dim3 grid(nx/block.x,ny/block.y);
hello_cuda<<<grid,block>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
22d0d26c378ce9c76af567a992917d85a705f441.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/nvbench.cuh>
// Grab some testing kernels from NVBench:
#include <nvbench/test_kernels.cuh>
// Thrust simplifies memory management, etc:
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
// mod2_inplace performs an in-place mod2 over every element in `data`. `data`
// is reset to `input` each iteration. A manual timer is requested by passing
// `nvbench::exec_tag::timer` to `state.exec(...)`, which is used to only time
// the mod2, and not the reset.
//
// Note that this disables the batch timings, since the reset phase will throw
// off the batch results.
void mod2_inplace(nvbench::state &state)
{
// Allocate input data:
const std::size_t num_values = 64 * 1024 * 1024 / sizeof(nvbench::int32_t);
thrust::device_vector<nvbench::int32_t> input(num_values);
thrust::sequence(input.begin(), input.end());
// Working data buffer:
thrust::device_vector<nvbench::int32_t> data(num_values);
// Provide throughput information:
state.add_element_count(num_values);
state.add_global_memory_reads<nvbench::int32_t>(num_values);
state.add_global_memory_writes<nvbench::int32_t>(num_values);
// Request timer with `nvbench::exec_tag::timer`:
state.exec(nvbench::exec_tag::timer,
// Lambda now takes a `timer` argument:
[&input, &data, num_values](nvbench::launch &launch, auto &timer) {
// Reset working data:
thrust::copy(thrust::device.on(launch.get_stream()),
input.cbegin(),
input.cend(),
data.begin());
// Start timer:
timer.start();
// Run kernel of interest:
hipLaunchKernelGGL(( nvbench::mod2_kernel), dim3(256), dim3(256), 0, launch.get_stream(),
thrust::raw_pointer_cast(input.data()),
thrust::raw_pointer_cast(input.data()),
num_values);
// Stop timer:
timer.stop();
});
}
NVBENCH_BENCH(mod2_inplace);
| 22d0d26c378ce9c76af567a992917d85a705f441.cu | /*
* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/nvbench.cuh>
// Grab some testing kernels from NVBench:
#include <nvbench/test_kernels.cuh>
// Thrust simplifies memory management, etc:
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
// mod2_inplace performs an in-place mod2 over every element in `data`. `data`
// is reset to `input` each iteration. A manual timer is requested by passing
// `nvbench::exec_tag::timer` to `state.exec(...)`, which is used to only time
// the mod2, and not the reset.
//
// Note that this disables the batch timings, since the reset phase will throw
// off the batch results.
void mod2_inplace(nvbench::state &state)
{
// Allocate input data:
const std::size_t num_values = 64 * 1024 * 1024 / sizeof(nvbench::int32_t);
thrust::device_vector<nvbench::int32_t> input(num_values);
thrust::sequence(input.begin(), input.end());
// Working data buffer:
thrust::device_vector<nvbench::int32_t> data(num_values);
// Provide throughput information:
state.add_element_count(num_values);
state.add_global_memory_reads<nvbench::int32_t>(num_values);
state.add_global_memory_writes<nvbench::int32_t>(num_values);
// Request timer with `nvbench::exec_tag::timer`:
state.exec(nvbench::exec_tag::timer,
// Lambda now takes a `timer` argument:
[&input, &data, num_values](nvbench::launch &launch, auto &timer) {
// Reset working data:
thrust::copy(thrust::device.on(launch.get_stream()),
input.cbegin(),
input.cend(),
data.begin());
// Start timer:
timer.start();
// Run kernel of interest:
nvbench::mod2_kernel<<<256, 256, 0, launch.get_stream()>>>(
thrust::raw_pointer_cast(input.data()),
thrust::raw_pointer_cast(input.data()),
num_values);
// Stop timer:
timer.stop();
});
}
NVBENCH_BENCH(mod2_inplace);
|
ced3b7ae9c9a67e7b6f9dc165be3b686ad4beca7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/gpu_memory.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
// Those defines serve single purpose to keep sane C++ formatting
// in presence of <80 characters rule
#define cudnnConvFwd cudnnConvolutionForward
#define cudnnConvBwdBias cudnnConvolutionBackwardBias
#define cudnnConvBwdFilter cudnnConvolutionBackwardFilter
#define cudnnConvBwdData cudnnConvolutionBackwardData
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::
Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Test free space and force reshape if allocations have changed
size_t workspace_limit_bytes, total_memory;
gpu_memory::getInfo(&workspace_limit_bytes, &total_memory);
if (workspace_fwd_sizes_[i] > workspace_limit_bytes) {
this->Reshape(bottom, top);
}
// !!!! Not safe if group_ > 1 !!!!
workspace.reserve(workspace_fwd_sizes_[i]);
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvFwd(Caffe::cudnn_handle(),
cudnn::dataType<Dtype>::one,
bottom_descs_[i],
bottom_data + bottom_offset_ * g,
filter_desc_,
weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i],
workspace.data(),
workspace.size(),
cudnn::dataType<Dtype>::zero,
top_descs_[i],
top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(Caffe::cudnn_handle(),
cudnn::dataType<Dtype>::one,
bias_desc_,
bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i],
top_data + top_offset_ * g));
}
}
workspace.release();
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
CUDA_CHECK(hipStreamSynchronize(cudaStreamLegacy));
}
}
template <typename Dtype>
void
CuDNNConvolutionLayer<Dtype>::
Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Test free space and force reshape if allocations have changed
size_t workspace_limit_bytes, total_memory;
gpu_memory::getInfo(&workspace_limit_bytes, &total_memory);
if (workspace_bwd_filter_sizes_[i] > workspace_limit_bytes ||
workspace_bwd_data_sizes_[i] > workspace_limit_bytes) {
this->Reshape(bottom, top);
}
// To remove pressure on allocator, allocate the larger of the
// workspaces needed for the following steps
size_t workspace_reserve = workspace_bwd_filter_sizes_[i] >
workspace_bwd_data_sizes_[i] ?
workspace_bwd_filter_sizes_[i] : workspace_bwd_data_sizes_[i];
// !!!! Not safe if group_ > 1 !!!!
workspace.reserve(workspace_reserve);
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvBwdBias(Caffe::cudnn_handle(),
cudnn::dataType<Dtype>::one,
top_descs_[i],
top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_,
bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvBwdFilter(Caffe::cudnn_handle(),
cudnn::dataType<Dtype>::one,
bottom_descs_[i],
bottom_data + bottom_offset_ * g,
top_descs_[i],
top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i],
workspace.data(),
workspace.size(),
cudnn::dataType<Dtype>::one,
filter_desc_,
weight_diff + weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvBwdData(Caffe::cudnn_handle(),
cudnn::dataType<Dtype>::one,
filter_desc_,
weight + this->weight_offset_ * g,
top_descs_[i],
top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i],
workspace.data(),
workspace.size(),
cudnn::dataType<Dtype>::zero,
bottom_descs_[i],
bottom_diff + bottom_offset_ * g));
}
}
workspace.release();
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
CUDA_CHECK(hipStreamSynchronize(cudaStreamLegacy));
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
| ced3b7ae9c9a67e7b6f9dc165be3b686ad4beca7.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/gpu_memory.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
// Those defines serve single purpose to keep sane C++ formatting
// in presence of <80 characters rule
#define cudnnConvFwd cudnnConvolutionForward
#define cudnnConvBwdBias cudnnConvolutionBackwardBias
#define cudnnConvBwdFilter cudnnConvolutionBackwardFilter
#define cudnnConvBwdData cudnnConvolutionBackwardData
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::
Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Test free space and force reshape if allocations have changed
size_t workspace_limit_bytes, total_memory;
gpu_memory::getInfo(&workspace_limit_bytes, &total_memory);
if (workspace_fwd_sizes_[i] > workspace_limit_bytes) {
this->Reshape(bottom, top);
}
// !!!! Not safe if group_ > 1 !!!!
workspace.reserve(workspace_fwd_sizes_[i]);
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvFwd(Caffe::cudnn_handle(),
cudnn::dataType<Dtype>::one,
bottom_descs_[i],
bottom_data + bottom_offset_ * g,
filter_desc_,
weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i],
workspace.data(),
workspace.size(),
cudnn::dataType<Dtype>::zero,
top_descs_[i],
top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(Caffe::cudnn_handle(),
cudnn::dataType<Dtype>::one,
bias_desc_,
bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i],
top_data + top_offset_ * g));
}
}
workspace.release();
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
CUDA_CHECK(cudaStreamSynchronize(cudaStreamLegacy));
}
}
template <typename Dtype>
void
CuDNNConvolutionLayer<Dtype>::
Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Test free space and force reshape if allocations have changed
size_t workspace_limit_bytes, total_memory;
gpu_memory::getInfo(&workspace_limit_bytes, &total_memory);
if (workspace_bwd_filter_sizes_[i] > workspace_limit_bytes ||
workspace_bwd_data_sizes_[i] > workspace_limit_bytes) {
this->Reshape(bottom, top);
}
// To remove pressure on allocator, allocate the larger of the
// workspaces needed for the following steps
size_t workspace_reserve = workspace_bwd_filter_sizes_[i] >
workspace_bwd_data_sizes_[i] ?
workspace_bwd_filter_sizes_[i] : workspace_bwd_data_sizes_[i];
// !!!! Not safe if group_ > 1 !!!!
workspace.reserve(workspace_reserve);
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvBwdBias(Caffe::cudnn_handle(),
cudnn::dataType<Dtype>::one,
top_descs_[i],
top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_,
bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvBwdFilter(Caffe::cudnn_handle(),
cudnn::dataType<Dtype>::one,
bottom_descs_[i],
bottom_data + bottom_offset_ * g,
top_descs_[i],
top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i],
workspace.data(),
workspace.size(),
cudnn::dataType<Dtype>::one,
filter_desc_,
weight_diff + weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvBwdData(Caffe::cudnn_handle(),
cudnn::dataType<Dtype>::one,
filter_desc_,
weight + this->weight_offset_ * g,
top_descs_[i],
top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i],
workspace.data(),
workspace.size(),
cudnn::dataType<Dtype>::zero,
bottom_descs_[i],
bottom_diff + bottom_offset_ * g));
}
}
workspace.release();
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
CUDA_CHECK(cudaStreamSynchronize(cudaStreamLegacy));
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
de2bcd2f63e077ed88fafd271deb228af38f6354.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file unstagger.cpp
*
* Calculate the co-located velocity field for U and V
*
Description:
The grid in a staggered arrangement consists of central points ('o') where most parameter values
e.g. temperature and pressure are stored. The u-velocity points ('u') are shifted east by a half
grid spacing and v-velocity points ('v') shifted south by a half grid spacing. A example on a 3x3
grid is given below.
o---u---o---u---o---u
| | |
v v v
| | |
o---u---o---u---o---u
| | |
v v v
| | |
o---u---o---u---o---u
| | |
v v v
If collocation of all parameters is required, u- and v-velocity needs to be unstaggered. The
unstaggering is done by solving a set of linear equations. For the example grid shown above
the linear equations are.
Case u: Case v:
1*u(1,1) = o(1,1) 1*v(1,1) = o(1,1)
0.5*u(1,1) + 0.5*u(1,2) = o(1,2) 1*v(1,2) = o(1,2)
0.5*u(1,2) + 0.5*u(1,3) = o(1,3) 1*v(1,3) = o(1,3)
1*u(2,1) = o(2,1) 0.5*v(1,1) + 0.5*v(2,1) = o(2,1)
0.5*u(2,1) + 0.5*u(2,2) = o(2,2) 0.5*v(1,2) + 0.5*v(2,2) = o(2,2)
0.5*u(2,2) + 0.5*u(2,3) = o(2,3) 0.5*v(1,3) + 0.5*v(2,3) = o(2,3)
1*u(3,1) = o(3,1) 0.5*v(2,1) + 0.5*v(3,1) = o(3,1)
0.5*u(3,1) + 0.5*u(3,2) = o(3,2) 0.5*v(2,2) + 0.5*v(3,2) = o(3,2)
0.5*u(3,2) + 0.5*u(3,3) = o(3,3) 0.5*v(2,3) + 0.5*v(3,3) = o(3,3)
These equations can be re-written in matrix-vector form
|- -|
| u(1,1) |
| . |
| . |
| u(3,3) |
|- -|
|- -| |- -|
| w11 ... w19 | | o(1,1) |
| . . | | . |
| . . | | . |
| w91 ... w99 | | o(3,3) |
|- -| |- -|
where the weighting matrices for the u- and v-case are diagonal matrices.
The diagonal matrix to unstagger the u-velocity field has the form
| 2 0 0 0 0 0 0 0 0 |
| 1 1 0 0 0 0 0 0 0 |
| 0 1 1 0 0 0 0 0 0 |
| 0 0 0 2 0 0 0 0 0 |
1/2 * | 0 0 0 1 1 0 0 0 0 | = U_unstagger
| 0 0 0 0 1 1 0 0 0 |
| 0 0 0 0 0 0 2 0 0 |
| 0 0 0 0 0 0 1 1 0 |
| 0 0 0 0 0 0 0 1 1 |
and the diagonal matrix to unstagger the v-velocity
| 2 0 0 0 0 0 0 0 0 |
| 0 2 0 0 0 0 0 0 0 |
| 0 0 2 0 0 0 0 0 0 |
| 1 0 0 1 0 0 0 0 0 |
1/2 * | 0 1 0 0 1 0 0 0 0 | = V_unstagger
| 0 0 1 0 0 1 0 0 0 |
| 0 0 0 1 0 0 1 0 0 |
| 0 0 0 0 1 0 0 1 0 |
| 0 0 0 0 0 1 0 0 1 |
In this form the problem can be solved efficiently using a sparse matrix library
matrix-vector multiplication. In this implementation CUSP is used for that purpose.
**/
#include "cuda_plugin_helper.h"
#include "unstagger.cuh"
#ifdef DEBUG
#undef DEBUG
#include <cusp/array1d.h>
#include <cusp/array2d.h>
#include <cusp/dia_matrix.h>
#include <cusp/multiply.h>
#include <thrust/system/hip/execution_policy.h>
#define DEBUG
#else
#include <cusp/array1d.h>
#include <cusp/array2d.h>
#include <cusp/dia_matrix.h>
#include <cusp/multiply.h>
#include <thrust/system/hip/execution_policy.h>
#endif
cusp::dia_matrix<int, double, cusp::device_memory> U_unstagger;
cusp::dia_matrix<int, double, cusp::device_memory> V_unstagger;
void himan::plugin::unstagger_cuda::Init(size_t NX, size_t NY)
{
// create diagonal matix with constant coefficiants
size_t N = NX * NY;
cusp::dia_matrix<int, double, cusp::host_memory> h_U_unstagger(N, N, 2 * N, 2);
cusp::dia_matrix<int, double, cusp::host_memory> h_V_unstagger(N, N, 2 * N, 2);
cusp::array2d<double, cusp::device_memory> Diags(N, 2, 0.5);
h_U_unstagger.diagonal_offsets[0] = 0;
h_U_unstagger.diagonal_offsets[1] = -1;
h_U_unstagger.values = Diags;
// alter coefficient for interpolation of first column in U
for (size_t i = 0; i < NY; ++i)
{
h_U_unstagger.values(i * NX, 0) = 1.0;
h_U_unstagger.values(i * NX, 1) = 0.0;
}
h_V_unstagger.diagonal_offsets[0] = 0;
h_V_unstagger.diagonal_offsets[1] = -NX;
h_V_unstagger.values = Diags;
// alter coefficient for interpolation of first row in V
for (size_t i = 0; i < NX; ++i)
{
h_V_unstagger.values(i, 0) = 1.0;
h_V_unstagger.values(i, 1) = 0.0;
}
// copy matrices to device
U_unstagger = h_U_unstagger;
V_unstagger = h_V_unstagger;
}
std::pair<std::vector<double>, std::vector<double>> himan::plugin::unstagger_cuda::Process(std::vector<double>& U_in,
std::vector<double>& V_in)
{
size_t N = U_in.size();
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
std::vector<double> U_out(N);
std::vector<double> V_out(N);
CUDA_CHECK(hipHostRegister(U_in.data(), sizeof(double) * N, 0));
CUDA_CHECK(hipHostRegister(V_in.data(), sizeof(double) * N, 0));
CUDA_CHECK(hipHostRegister(U_out.data(), sizeof(double) * N, 0));
CUDA_CHECK(hipHostRegister(V_out.data(), sizeof(double) * N, 0));
// create 1d arrays on device
double* d_U = nullptr; // pointer to device memory pointing to incoming data of U
double* d_V = nullptr; // pointer to device memory pointing to incoming data of V
double* d_U_out = nullptr; // pointer to device memory to unstaggered data of U
double* d_V_out = nullptr; // pointer to device memory to unstaggered data of V
// allocate memory
CUDA_CHECK(hipMalloc((void**)&d_U, sizeof(double) * N));
CUDA_CHECK(hipMalloc((void**)&d_V, sizeof(double) * N));
CUDA_CHECK(hipMalloc((void**)&d_U_out, sizeof(double) * N));
CUDA_CHECK(hipMalloc((void**)&d_V_out, sizeof(double) * N));
// copy data to device
CUDA_CHECK(hipMemcpyAsync(d_U, U_in.data(), sizeof(double) * N, hipMemcpyHostToDevice, stream));
CUDA_CHECK(hipMemcpyAsync(d_V, V_in.data(), sizeof(double) * N, hipMemcpyHostToDevice, stream));
// cast raw pointer to thrust device pointer
thrust::device_ptr<double> dt_U = thrust::device_pointer_cast(d_U);
thrust::device_ptr<double> dt_V = thrust::device_pointer_cast(d_V);
thrust::device_ptr<double> dt_U_out = thrust::device_pointer_cast(d_U_out);
thrust::device_ptr<double> dt_V_out = thrust::device_pointer_cast(d_V_out);
// create cusp::array1d
auto U_device = cusp::array1d_view<thrust::device_ptr<double>>(dt_U, dt_U + N);
auto V_device = cusp::array1d_view<thrust::device_ptr<double>>(dt_V, dt_V + N);
auto U_device_out = cusp::array1d_view<thrust::device_ptr<double>>(dt_U_out, dt_U_out + N);
auto V_device_out = cusp::array1d_view<thrust::device_ptr<double>>(dt_V_out, dt_V_out + N);
// perform the unstagger operation
cusp::multiply(thrust::hip::par.on(stream), U_unstagger, U_device, U_device_out);
cusp::multiply(thrust::hip::par.on(stream), V_unstagger, V_device, V_device_out);
// copy result back to host
CUDA_CHECK(hipMemcpyAsync(U_out.data(), d_U_out, sizeof(double) * N, hipMemcpyDeviceToHost, stream));
CUDA_CHECK(hipMemcpyAsync(V_out.data(), d_V_out, sizeof(double) * N, hipMemcpyDeviceToHost, stream));
// free memory
CUDA_CHECK(hipFree(d_U));
CUDA_CHECK(hipFree(d_V));
CUDA_CHECK(hipFree(d_U_out));
CUDA_CHECK(hipFree(d_V_out));
CUDA_CHECK(hipHostUnregister(U_in.data()));
CUDA_CHECK(hipHostUnregister(V_in.data()));
CUDA_CHECK(hipHostUnregister(U_out.data()));
CUDA_CHECK(hipHostUnregister(V_out.data()));
CUDA_CHECK(hipStreamDestroy(stream));
return std::make_pair(U_out, V_out);
}
| de2bcd2f63e077ed88fafd271deb228af38f6354.cu | /**
* @file unstagger.cpp
*
* Calculate the co-located velocity field for U and V
*
Description:
The grid in a staggered arrangement consists of central points ('o') where most parameter values
e.g. temperature and pressure are stored. The u-velocity points ('u') are shifted east by a half
grid spacing and v-velocity points ('v') shifted south by a half grid spacing. A example on a 3x3
grid is given below.
o---u---o---u---o---u
| | |
v v v
| | |
o---u---o---u---o---u
| | |
v v v
| | |
o---u---o---u---o---u
| | |
v v v
If collocation of all parameters is required, u- and v-velocity needs to be unstaggered. The
unstaggering is done by solving a set of linear equations. For the example grid shown above
the linear equations are.
Case u: Case v:
1*u(1,1) = o(1,1) 1*v(1,1) = o(1,1)
0.5*u(1,1) + 0.5*u(1,2) = o(1,2) 1*v(1,2) = o(1,2)
0.5*u(1,2) + 0.5*u(1,3) = o(1,3) 1*v(1,3) = o(1,3)
1*u(2,1) = o(2,1) 0.5*v(1,1) + 0.5*v(2,1) = o(2,1)
0.5*u(2,1) + 0.5*u(2,2) = o(2,2) 0.5*v(1,2) + 0.5*v(2,2) = o(2,2)
0.5*u(2,2) + 0.5*u(2,3) = o(2,3) 0.5*v(1,3) + 0.5*v(2,3) = o(2,3)
1*u(3,1) = o(3,1) 0.5*v(2,1) + 0.5*v(3,1) = o(3,1)
0.5*u(3,1) + 0.5*u(3,2) = o(3,2) 0.5*v(2,2) + 0.5*v(3,2) = o(3,2)
0.5*u(3,2) + 0.5*u(3,3) = o(3,3) 0.5*v(2,3) + 0.5*v(3,3) = o(3,3)
These equations can be re-written in matrix-vector form
|- -|
| u(1,1) |
| . |
| . |
| u(3,3) |
|- -|
|- -| |- -|
| w11 ... w19 | | o(1,1) |
| . . | | . |
| . . | | . |
| w91 ... w99 | | o(3,3) |
|- -| |- -|
where the weighting matrices for the u- and v-case are diagonal matrices.
The diagonal matrix to unstagger the u-velocity field has the form
| 2 0 0 0 0 0 0 0 0 |
| 1 1 0 0 0 0 0 0 0 |
| 0 1 1 0 0 0 0 0 0 |
| 0 0 0 2 0 0 0 0 0 |
1/2 * | 0 0 0 1 1 0 0 0 0 | = U_unstagger
| 0 0 0 0 1 1 0 0 0 |
| 0 0 0 0 0 0 2 0 0 |
| 0 0 0 0 0 0 1 1 0 |
| 0 0 0 0 0 0 0 1 1 |
and the diagonal matrix to unstagger the v-velocity
| 2 0 0 0 0 0 0 0 0 |
| 0 2 0 0 0 0 0 0 0 |
| 0 0 2 0 0 0 0 0 0 |
| 1 0 0 1 0 0 0 0 0 |
1/2 * | 0 1 0 0 1 0 0 0 0 | = V_unstagger
| 0 0 1 0 0 1 0 0 0 |
| 0 0 0 1 0 0 1 0 0 |
| 0 0 0 0 1 0 0 1 0 |
| 0 0 0 0 0 1 0 0 1 |
In this form the problem can be solved efficiently using a sparse matrix library
matrix-vector multiplication. In this implementation CUSP is used for that purpose.
**/
#include "cuda_plugin_helper.h"
#include "unstagger.cuh"
#ifdef DEBUG
#undef DEBUG
#include <cusp/array1d.h>
#include <cusp/array2d.h>
#include <cusp/dia_matrix.h>
#include <cusp/multiply.h>
#include <thrust/system/cuda/execution_policy.h>
#define DEBUG
#else
#include <cusp/array1d.h>
#include <cusp/array2d.h>
#include <cusp/dia_matrix.h>
#include <cusp/multiply.h>
#include <thrust/system/cuda/execution_policy.h>
#endif
cusp::dia_matrix<int, double, cusp::device_memory> U_unstagger;
cusp::dia_matrix<int, double, cusp::device_memory> V_unstagger;
void himan::plugin::unstagger_cuda::Init(size_t NX, size_t NY)
{
// create diagonal matix with constant coefficiants
size_t N = NX * NY;
cusp::dia_matrix<int, double, cusp::host_memory> h_U_unstagger(N, N, 2 * N, 2);
cusp::dia_matrix<int, double, cusp::host_memory> h_V_unstagger(N, N, 2 * N, 2);
cusp::array2d<double, cusp::device_memory> Diags(N, 2, 0.5);
h_U_unstagger.diagonal_offsets[0] = 0;
h_U_unstagger.diagonal_offsets[1] = -1;
h_U_unstagger.values = Diags;
// alter coefficient for interpolation of first column in U
for (size_t i = 0; i < NY; ++i)
{
h_U_unstagger.values(i * NX, 0) = 1.0;
h_U_unstagger.values(i * NX, 1) = 0.0;
}
h_V_unstagger.diagonal_offsets[0] = 0;
h_V_unstagger.diagonal_offsets[1] = -NX;
h_V_unstagger.values = Diags;
// alter coefficient for interpolation of first row in V
for (size_t i = 0; i < NX; ++i)
{
h_V_unstagger.values(i, 0) = 1.0;
h_V_unstagger.values(i, 1) = 0.0;
}
// copy matrices to device
U_unstagger = h_U_unstagger;
V_unstagger = h_V_unstagger;
}
std::pair<std::vector<double>, std::vector<double>> himan::plugin::unstagger_cuda::Process(std::vector<double>& U_in,
std::vector<double>& V_in)
{
size_t N = U_in.size();
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
std::vector<double> U_out(N);
std::vector<double> V_out(N);
CUDA_CHECK(cudaHostRegister(U_in.data(), sizeof(double) * N, 0));
CUDA_CHECK(cudaHostRegister(V_in.data(), sizeof(double) * N, 0));
CUDA_CHECK(cudaHostRegister(U_out.data(), sizeof(double) * N, 0));
CUDA_CHECK(cudaHostRegister(V_out.data(), sizeof(double) * N, 0));
// create 1d arrays on device
double* d_U = nullptr; // pointer to device memory pointing to incoming data of U
double* d_V = nullptr; // pointer to device memory pointing to incoming data of V
double* d_U_out = nullptr; // pointer to device memory to unstaggered data of U
double* d_V_out = nullptr; // pointer to device memory to unstaggered data of V
// allocate memory
CUDA_CHECK(cudaMalloc((void**)&d_U, sizeof(double) * N));
CUDA_CHECK(cudaMalloc((void**)&d_V, sizeof(double) * N));
CUDA_CHECK(cudaMalloc((void**)&d_U_out, sizeof(double) * N));
CUDA_CHECK(cudaMalloc((void**)&d_V_out, sizeof(double) * N));
// copy data to device
CUDA_CHECK(cudaMemcpyAsync(d_U, U_in.data(), sizeof(double) * N, cudaMemcpyHostToDevice, stream));
CUDA_CHECK(cudaMemcpyAsync(d_V, V_in.data(), sizeof(double) * N, cudaMemcpyHostToDevice, stream));
// cast raw pointer to thrust device pointer
thrust::device_ptr<double> dt_U = thrust::device_pointer_cast(d_U);
thrust::device_ptr<double> dt_V = thrust::device_pointer_cast(d_V);
thrust::device_ptr<double> dt_U_out = thrust::device_pointer_cast(d_U_out);
thrust::device_ptr<double> dt_V_out = thrust::device_pointer_cast(d_V_out);
// create cusp::array1d
auto U_device = cusp::array1d_view<thrust::device_ptr<double>>(dt_U, dt_U + N);
auto V_device = cusp::array1d_view<thrust::device_ptr<double>>(dt_V, dt_V + N);
auto U_device_out = cusp::array1d_view<thrust::device_ptr<double>>(dt_U_out, dt_U_out + N);
auto V_device_out = cusp::array1d_view<thrust::device_ptr<double>>(dt_V_out, dt_V_out + N);
// perform the unstagger operation
cusp::multiply(thrust::cuda::par.on(stream), U_unstagger, U_device, U_device_out);
cusp::multiply(thrust::cuda::par.on(stream), V_unstagger, V_device, V_device_out);
// copy result back to host
CUDA_CHECK(cudaMemcpyAsync(U_out.data(), d_U_out, sizeof(double) * N, cudaMemcpyDeviceToHost, stream));
CUDA_CHECK(cudaMemcpyAsync(V_out.data(), d_V_out, sizeof(double) * N, cudaMemcpyDeviceToHost, stream));
// free memory
CUDA_CHECK(cudaFree(d_U));
CUDA_CHECK(cudaFree(d_V));
CUDA_CHECK(cudaFree(d_U_out));
CUDA_CHECK(cudaFree(d_V_out));
CUDA_CHECK(cudaHostUnregister(U_in.data()));
CUDA_CHECK(cudaHostUnregister(V_in.data()));
CUDA_CHECK(cudaHostUnregister(U_out.data()));
CUDA_CHECK(cudaHostUnregister(V_out.data()));
CUDA_CHECK(cudaStreamDestroy(stream));
return std::make_pair(U_out, V_out);
}
|
41d0164c0f858025b4bef636a5195855ccb0aacd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "hip/hip_runtime.h"
template <typename T>
__global__ void computeFlip(
const T* x,
const int M,
const int N,
T* y
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < M*N; i += blockDim.x * gridDim.x)
{
int ii = i%N;
y[i] = x[i+N-ii*2-1];
}
}
template <typename T>
void computeFlipCudaLauncher(
const T* x,
const int M,
const int N,
T* y
)
{
hipLaunchKernelGGL(( computeFlip), dim3(32), dim3(1024), 0, 0,
x,
M,
N,
y
);
}
template <typename T>
__global__ void computeFlipAndShift(
const T* x,
const int M,
const int N,
T* y
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < M*N; i += blockDim.x * gridDim.x)
{
int ii = i%N;
y[i] = (ii)? x[i+N-ii*2] : 0;
}
}
template <typename T>
void computeFlipAndShiftCudaLauncher(
const T* x,
const int M,
const int N,
T* y
)
{
hipLaunchKernelGGL(( computeFlipAndShift), dim3(32), dim3(1024), 0, 0,
x,
M,
N,
y
);
}
template <typename T>
__global__ void negateOddEntries(
T* x,
const int M,
const int N
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < M*(N>>1); i += blockDim.x * gridDim.x)
{
x[i*2+1] = -x[i*2+1];
}
}
template <typename T>
void negateOddEntriesCudaLauncher(
T* x,
const int M,
const int N
)
{
hipLaunchKernelGGL(( negateOddEntries), dim3(32), dim3(1024), 0, 0,
x,
M,
N
);
}
#define REGISTER_FLIP_KERNEL_LAUNCHER(type) \
void instantiateComputeFlipLauncher(\
const type* x, \
const int M, \
const int N, \
type* y \
) \
{ \
return computeFlipCudaLauncher<type>( \
x, \
M, \
N, \
y \
); \
}
REGISTER_FLIP_KERNEL_LAUNCHER(float);
REGISTER_FLIP_KERNEL_LAUNCHER(double);
#define REGISTER_FLIPANDSHIFT_KERNEL_LAUNCHER(type) \
void instantiateComputeFlipAndShiftLauncher(\
const type* x, \
const int M, \
const int N, \
type* y \
) \
{ \
return computeFlipAndShiftCudaLauncher<type>( \
x, \
M, \
N, \
y \
); \
}
REGISTER_FLIPANDSHIFT_KERNEL_LAUNCHER(float);
REGISTER_FLIPANDSHIFT_KERNEL_LAUNCHER(double);
#define REGISTER_NEGATE_KERNEL_LAUNCHER(type) \
void instantiateNegateOddEntriesCudaLauncher(\
type* x, \
const int M, \
const int N \
) \
{ \
return negateOddEntriesCudaLauncher<type>( \
x, \
M, \
N \
); \
}
REGISTER_NEGATE_KERNEL_LAUNCHER(float);
REGISTER_NEGATE_KERNEL_LAUNCHER(double);
| 41d0164c0f858025b4bef636a5195855ccb0aacd.cu | #include <stdio.h>
#include <math.h>
#include <float.h>
#include "cuda_runtime.h"
template <typename T>
__global__ void computeFlip(
const T* x,
const int M,
const int N,
T* y
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < M*N; i += blockDim.x * gridDim.x)
{
int ii = i%N;
y[i] = x[i+N-ii*2-1];
}
}
template <typename T>
void computeFlipCudaLauncher(
const T* x,
const int M,
const int N,
T* y
)
{
computeFlip<<<32, 1024>>>(
x,
M,
N,
y
);
}
template <typename T>
__global__ void computeFlipAndShift(
const T* x,
const int M,
const int N,
T* y
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < M*N; i += blockDim.x * gridDim.x)
{
int ii = i%N;
y[i] = (ii)? x[i+N-ii*2] : 0;
}
}
template <typename T>
void computeFlipAndShiftCudaLauncher(
const T* x,
const int M,
const int N,
T* y
)
{
computeFlipAndShift<<<32, 1024>>>(
x,
M,
N,
y
);
}
template <typename T>
__global__ void negateOddEntries(
T* x,
const int M,
const int N
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < M*(N>>1); i += blockDim.x * gridDim.x)
{
x[i*2+1] = -x[i*2+1];
}
}
template <typename T>
void negateOddEntriesCudaLauncher(
T* x,
const int M,
const int N
)
{
negateOddEntries<<<32, 1024>>>(
x,
M,
N
);
}
#define REGISTER_FLIP_KERNEL_LAUNCHER(type) \
void instantiateComputeFlipLauncher(\
const type* x, \
const int M, \
const int N, \
type* y \
) \
{ \
return computeFlipCudaLauncher<type>( \
x, \
M, \
N, \
y \
); \
}
REGISTER_FLIP_KERNEL_LAUNCHER(float);
REGISTER_FLIP_KERNEL_LAUNCHER(double);
#define REGISTER_FLIPANDSHIFT_KERNEL_LAUNCHER(type) \
void instantiateComputeFlipAndShiftLauncher(\
const type* x, \
const int M, \
const int N, \
type* y \
) \
{ \
return computeFlipAndShiftCudaLauncher<type>( \
x, \
M, \
N, \
y \
); \
}
REGISTER_FLIPANDSHIFT_KERNEL_LAUNCHER(float);
REGISTER_FLIPANDSHIFT_KERNEL_LAUNCHER(double);
#define REGISTER_NEGATE_KERNEL_LAUNCHER(type) \
void instantiateNegateOddEntriesCudaLauncher(\
type* x, \
const int M, \
const int N \
) \
{ \
return negateOddEntriesCudaLauncher<type>( \
x, \
M, \
N \
); \
}
REGISTER_NEGATE_KERNEL_LAUNCHER(float);
REGISTER_NEGATE_KERNEL_LAUNCHER(double);
|
c089c844d55b6be75459219936efc53907ac24fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <float.h>
namespace oneflow {
namespace {
// NOTE(Liang Depeng): refer to
// https://stackoverflow.com/questions/17371275/implementing-max-reduce-in-cuda
template<typename T>
__global__ void ReduceMaxMinPerLayer(const T* input_ptr, const int64_t elements, T* max_ptr,
T* min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T* shared_max = reinterpret_cast<T*>(shared_max_min_memory);
T* shared_min = shared_max + blockDim.x;
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
while (gid < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[gid]);
shared_min[tid] = max(shared_min[tid], -input_ptr[gid]);
gid += gridDim.x * blockDim.x;
}
__syncthreads();
gid = (blockDim.x * blockIdx.x) + tid;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s && gid < elements) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(max_ptr, shared_max[0]);
cuda::atomic::Max(min_ptr, shared_min[0]);
}
}
template<typename T>
__global__ void ReduceMaxMinPerChannel(const T* input_ptr, const int64_t elements,
const int64_t num_channels, const int64_t panel_size,
T* max_ptr, T* min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T* shared_max = reinterpret_cast<T*>(shared_max_min_memory);
T* shared_min = shared_max + blockDim.x;
int64_t cur_channel = blockIdx.x;
int64_t tid = threadIdx.x;
while (cur_channel < num_channels) {
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
int64_t index = (panel_size * cur_channel) + tid;
int64_t end = panel_size * (cur_channel + 1);
while (index < end && index < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[index]);
shared_min[tid] = max(shared_min[tid], -input_ptr[index]);
index += blockDim.x;
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(&max_ptr[cur_channel], shared_max[0]);
cuda::atomic::Max(&min_ptr[cur_channel], shared_min[0]);
}
// __syncthreads();
cur_channel += gridDim.x;
}
}
template<typename T>
__global__ void InitMaxMin(const int64_t elements, T* max_ptr, T* min_ptr) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
max_ptr[gid] = -FLT_MAX;
min_ptr[gid] = -FLT_MAX;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointSymmetric(const T* max_ptr, const T* min_ptr,
const int64_t elements, const double quantization_bit,
T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = weight_max / denominator;
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointAffine(const T* max_ptr, const T* min_ptr, const int64_t elements,
const double quantization_bit, T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T denominator = static_cast<T>(pow(2.0, quantization_bit)) - 1;
T min = -min_ptr[gid];
T s = (max_ptr[gid] - min) / denominator;
scale[gid] = s;
zero_point[gid] = -round(min / s);
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointCambricon(const T* max_ptr, const T* min_ptr,
const int64_t elements, const double quantization_bit,
T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
// T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = floor(log2(weight_max)) - (quantization_bit - 2);
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
} // namespace
#define LAUNCH_CUDA_KERNEL(func, device_ctx_ptr, thread_num, shared_mem_size, ...) \
hipLaunchKernelGGL(( func), dim3(SMBlocksNum4ThreadsNum(thread_num)), dim3(kCudaThreadsNumPerBlock), shared_mem_size, \
(device_ctx_ptr)->cuda_stream(), __VA_ARGS__)
template<typename T>
class GpuMinMaxObserverKernel final : public user_op::OpKernel {
public:
GpuMinMaxObserverKernel() = default;
~GpuMinMaxObserverKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* scale = ctx->Tensor4ArgNameAndIndex("scale", 0);
user_op::Tensor* zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme");
const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit");
const bool per_layer_quantization = ctx->Attr<bool>("per_layer_quantization");
const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula");
const int64_t elements = in->shape().elem_cnt();
const int64_t channel = scale->shape().At(0);
const int64_t panel_size = elements / channel;
T* max_ptr = tmp_buffer->mut_dptr<T>();
T* min_ptr = max_ptr + channel;
LAUNCH_CUDA_KERNEL((InitMaxMin<T>), ctx->device_ctx(), channel, 0, channel, max_ptr, min_ptr);
if (per_layer_quantization) {
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerLayer<T>), ctx->device_ctx(), elements,
kCudaThreadsNumPerBlock * 2 * sizeof(T), in->dptr<T>(), elements, max_ptr,
min_ptr);
} else { // per-channel quantization
// NOTE(Liang Depeng): each block of threads will be responsible for
// computing the max and min values of the whole channel.
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerChannel<T>), ctx->device_ctx(),
channel * kCudaThreadsNumPerBlock, kCudaThreadsNumPerBlock * 2 * sizeof(T),
in->dptr<T>(), elements, channel, panel_size, max_ptr, min_ptr);
}
if (quantization_formula == "google") {
if (quantization_scheme == "symmetric") {
LAUNCH_CUDA_KERNEL((CalScaleZeroPointSymmetric<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else { // quantization_scheme == "affine"
LAUNCH_CUDA_KERNEL((CalScaleZeroPointAffine<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
}
} else if (quantization_formula == "cambricon") {
if (!per_layer_quantization) {
UNIMPLEMENTED() << " per-channel mode is not supported in cambricon scheme";
}
LAUNCH_CUDA_KERNEL((CalScaleZeroPointCambricon<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else {
UNIMPLEMENTED();
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MIN_MAX_OBSERVER_KERNEL(dtype) \
REGISTER_USER_KERNEL("min_max_observer") \
.SetCreateFn<GpuMinMaxObserverKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) -> size_t { \
size_t tmp_buffer_size = 1; \
if (ctx->Attr<bool>("per_layer_quantization") == false) { \
const Shape* in_shape = ctx->Shape4ArgNameAndIndex("in", 0); \
tmp_buffer_size = in_shape->At(0); \
} \
return 2 * tmp_buffer_size * sizeof(dtype); \
})
REGISTER_MIN_MAX_OBSERVER_KERNEL(float);
REGISTER_MIN_MAX_OBSERVER_KERNEL(double);
} // namespace oneflow
| c089c844d55b6be75459219936efc53907ac24fa.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <float.h>
namespace oneflow {
namespace {
// NOTE(Liang Depeng): refer to
// https://stackoverflow.com/questions/17371275/implementing-max-reduce-in-cuda
template<typename T>
__global__ void ReduceMaxMinPerLayer(const T* input_ptr, const int64_t elements, T* max_ptr,
T* min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T* shared_max = reinterpret_cast<T*>(shared_max_min_memory);
T* shared_min = shared_max + blockDim.x;
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
while (gid < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[gid]);
shared_min[tid] = max(shared_min[tid], -input_ptr[gid]);
gid += gridDim.x * blockDim.x;
}
__syncthreads();
gid = (blockDim.x * blockIdx.x) + tid;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s && gid < elements) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(max_ptr, shared_max[0]);
cuda::atomic::Max(min_ptr, shared_min[0]);
}
}
template<typename T>
__global__ void ReduceMaxMinPerChannel(const T* input_ptr, const int64_t elements,
const int64_t num_channels, const int64_t panel_size,
T* max_ptr, T* min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T* shared_max = reinterpret_cast<T*>(shared_max_min_memory);
T* shared_min = shared_max + blockDim.x;
int64_t cur_channel = blockIdx.x;
int64_t tid = threadIdx.x;
while (cur_channel < num_channels) {
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
int64_t index = (panel_size * cur_channel) + tid;
int64_t end = panel_size * (cur_channel + 1);
while (index < end && index < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[index]);
shared_min[tid] = max(shared_min[tid], -input_ptr[index]);
index += blockDim.x;
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(&max_ptr[cur_channel], shared_max[0]);
cuda::atomic::Max(&min_ptr[cur_channel], shared_min[0]);
}
// __syncthreads();
cur_channel += gridDim.x;
}
}
template<typename T>
__global__ void InitMaxMin(const int64_t elements, T* max_ptr, T* min_ptr) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
max_ptr[gid] = -FLT_MAX;
min_ptr[gid] = -FLT_MAX;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointSymmetric(const T* max_ptr, const T* min_ptr,
const int64_t elements, const double quantization_bit,
T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = weight_max / denominator;
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointAffine(const T* max_ptr, const T* min_ptr, const int64_t elements,
const double quantization_bit, T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T denominator = static_cast<T>(pow(2.0, quantization_bit)) - 1;
T min = -min_ptr[gid];
T s = (max_ptr[gid] - min) / denominator;
scale[gid] = s;
zero_point[gid] = -round(min / s);
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointCambricon(const T* max_ptr, const T* min_ptr,
const int64_t elements, const double quantization_bit,
T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
// T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = floor(log2(weight_max)) - (quantization_bit - 2);
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
} // namespace
#define LAUNCH_CUDA_KERNEL(func, device_ctx_ptr, thread_num, shared_mem_size, ...) \
func<<<SMBlocksNum4ThreadsNum(thread_num), kCudaThreadsNumPerBlock, shared_mem_size, \
(device_ctx_ptr)->cuda_stream()>>>(__VA_ARGS__)
template<typename T>
class GpuMinMaxObserverKernel final : public user_op::OpKernel {
public:
GpuMinMaxObserverKernel() = default;
~GpuMinMaxObserverKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* scale = ctx->Tensor4ArgNameAndIndex("scale", 0);
user_op::Tensor* zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme");
const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit");
const bool per_layer_quantization = ctx->Attr<bool>("per_layer_quantization");
const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula");
const int64_t elements = in->shape().elem_cnt();
const int64_t channel = scale->shape().At(0);
const int64_t panel_size = elements / channel;
T* max_ptr = tmp_buffer->mut_dptr<T>();
T* min_ptr = max_ptr + channel;
LAUNCH_CUDA_KERNEL((InitMaxMin<T>), ctx->device_ctx(), channel, 0, channel, max_ptr, min_ptr);
if (per_layer_quantization) {
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerLayer<T>), ctx->device_ctx(), elements,
kCudaThreadsNumPerBlock * 2 * sizeof(T), in->dptr<T>(), elements, max_ptr,
min_ptr);
} else { // per-channel quantization
// NOTE(Liang Depeng): each block of threads will be responsible for
// computing the max and min values of the whole channel.
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerChannel<T>), ctx->device_ctx(),
channel * kCudaThreadsNumPerBlock, kCudaThreadsNumPerBlock * 2 * sizeof(T),
in->dptr<T>(), elements, channel, panel_size, max_ptr, min_ptr);
}
if (quantization_formula == "google") {
if (quantization_scheme == "symmetric") {
LAUNCH_CUDA_KERNEL((CalScaleZeroPointSymmetric<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else { // quantization_scheme == "affine"
LAUNCH_CUDA_KERNEL((CalScaleZeroPointAffine<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
}
} else if (quantization_formula == "cambricon") {
if (!per_layer_quantization) {
UNIMPLEMENTED() << " per-channel mode is not supported in cambricon scheme";
}
LAUNCH_CUDA_KERNEL((CalScaleZeroPointCambricon<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else {
UNIMPLEMENTED();
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MIN_MAX_OBSERVER_KERNEL(dtype) \
REGISTER_USER_KERNEL("min_max_observer") \
.SetCreateFn<GpuMinMaxObserverKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) -> size_t { \
size_t tmp_buffer_size = 1; \
if (ctx->Attr<bool>("per_layer_quantization") == false) { \
const Shape* in_shape = ctx->Shape4ArgNameAndIndex("in", 0); \
tmp_buffer_size = in_shape->At(0); \
} \
return 2 * tmp_buffer_size * sizeof(dtype); \
})
REGISTER_MIN_MAX_OBSERVER_KERNEL(float);
REGISTER_MIN_MAX_OBSERVER_KERNEL(double);
} // namespace oneflow
|
3e7a96468e8ed5b991e8707998d7d7a645991204.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// This file contains C wrappers around the some of the CUDA API and the
// kernel functions so that they can be called from "particleSystem.cpp"
#if defined(__APPLE__) || defined(MACOSX)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
#include <cstdlib>
#include <cstdio>
#include <string.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "thrust/device_ptr.h"
#include "thrust/for_each.h"
#include "thrust/iterator/zip_iterator.h"
#include "thrust/sort.h"
#include "particles_kernel_impl.cuh"
extern "C"
{
void cudaInit(int argc, char **argv)
{
int devID;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("No CUDA Capable devices found, exiting...\n");
exit(EXIT_SUCCESS);
}
}
void allocateArray(void **devPtr, size_t size)
{
checkCudaErrors(hipMalloc(devPtr, size));
}
void freeArray(void *devPtr)
{
checkCudaErrors(hipFree(devPtr));
}
void threadSync()
{
checkCudaErrors(hipDeviceSynchronize());
}
void copyArrayToDevice(void *device, const void *host, int offset, int size)
{
checkCudaErrors(hipMemcpy((char *) device + offset, host, size, hipMemcpyHostToDevice));
}
void registerGLBufferObject(uint vbo, struct cudaGraphicsResource **cuda_vbo_resource)
{
checkCudaErrors(hipGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo,
hipGraphicsMapFlagsNone));
}
void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
checkCudaErrors(hipGraphicsUnregisterResource(cuda_vbo_resource));
}
void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource)
{
void *ptr;
checkCudaErrors(hipGraphicsMapResources(1, cuda_vbo_resource, 0));
size_t num_bytes;
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes,
*cuda_vbo_resource));
return ptr;
}
void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
checkCudaErrors(hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0));
}
void copyArrayFromDevice(void *host, const void *device,
struct cudaGraphicsResource **cuda_vbo_resource, int size)
{
if (cuda_vbo_resource)
{
device = mapGLBufferObject(cuda_vbo_resource);
}
checkCudaErrors(hipMemcpy(host, device, size, hipMemcpyDeviceToHost));
if (cuda_vbo_resource)
{
unmapGLBufferObject(*cuda_vbo_resource);
}
}
void setParameters(SimParams *hostParams)
{
// copy parameters to constant memory
checkCudaErrors(hipMemcpyToSymbol(params, hostParams, sizeof(SimParams)));
}
//Round a / b to nearest higher integer value
uint iDivUp(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
// compute grid and thread block size for a given number of elements
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
void integrateSystem(float *pos,
float *vel,
float deltaTime,
uint numParticles)
{
thrust::device_ptr<float2> d_pos2((float2 *)pos);
thrust::device_ptr<float2> d_vel2((float2 *)vel);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_pos2, d_vel2)),
thrust::make_zip_iterator(thrust::make_tuple(d_pos2+numParticles, d_vel2+numParticles)),
integrate_functor(deltaTime));
}
void calcHash(uint *gridParticleHash,
uint *gridParticleIndex,
float *pos,
int numParticles)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// execute the kernel
hipLaunchKernelGGL(( calcHashD), dim3(numBlocks), dim3(numThreads) , 0, 0, gridParticleHash,
gridParticleIndex,
(float2 *) pos,
numParticles);
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
}
void reorderDataAndFindCellStart(uint *cellStart,
uint *cellEnd,
float *sortedPos,
float *sortedVel,
uint *gridParticleHash,
uint *gridParticleIndex,
float *oldPos,
float *oldVel,
uint numParticles,
uint numCells)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// set all cells to empty
checkCudaErrors(hipMemset(cellStart, 0xffffffff, numCells*sizeof(uint)));
uint smemSize = sizeof(uint)*(numThreads+1);
hipLaunchKernelGGL(( reorderDataAndFindCellStartD), dim3(numBlocks), dim3(numThreads), smemSize, 0,
cellStart,
cellEnd,
(float2 *) sortedPos,
(float2 *) sortedVel,
gridParticleHash,
gridParticleIndex,
(float2 *) oldPos,
(float2 *) oldVel,
numParticles);
getLastCudaError("Kernel execution failed: reorderDataAndFindCellStartD");
}
void run(float *newVel,
float *sortedPos,
float *sortedVel,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
uint numParticles,
uint numCells)
{
// thread per particle
uint numThreads, numBlocks;
computeGridSize(numParticles, 64, numBlocks, numThreads);
// execute the kernel
hipLaunchKernelGGL(( run), dim3(numBlocks), dim3(numThreads) , 0, 0, (float2 *)newVel,
(float2 *)sortedPos,
(float2 *)sortedVel,
gridParticleIndex,
cellStart,
cellEnd,
numParticles);
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
}
void sortParticles(uint *dGridParticleHash, uint *dGridParticleIndex, uint numParticles)
{
thrust::sort_by_key(thrust::device_ptr<uint>(dGridParticleHash),
thrust::device_ptr<uint>(dGridParticleHash + numParticles),
thrust::device_ptr<uint>(dGridParticleIndex));
}
} // extern "C"
| 3e7a96468e8ed5b991e8707998d7d7a645991204.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// This file contains C wrappers around the some of the CUDA API and the
// kernel functions so that they can be called from "particleSystem.cpp"
#if defined(__APPLE__) || defined(MACOSX)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
#include <cstdlib>
#include <cstdio>
#include <string.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "thrust/device_ptr.h"
#include "thrust/for_each.h"
#include "thrust/iterator/zip_iterator.h"
#include "thrust/sort.h"
#include "particles_kernel_impl.cuh"
extern "C"
{
void cudaInit(int argc, char **argv)
{
int devID;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("No CUDA Capable devices found, exiting...\n");
exit(EXIT_SUCCESS);
}
}
void allocateArray(void **devPtr, size_t size)
{
checkCudaErrors(cudaMalloc(devPtr, size));
}
void freeArray(void *devPtr)
{
checkCudaErrors(cudaFree(devPtr));
}
void threadSync()
{
checkCudaErrors(cudaDeviceSynchronize());
}
void copyArrayToDevice(void *device, const void *host, int offset, int size)
{
checkCudaErrors(cudaMemcpy((char *) device + offset, host, size, cudaMemcpyHostToDevice));
}
void registerGLBufferObject(uint vbo, struct cudaGraphicsResource **cuda_vbo_resource)
{
checkCudaErrors(cudaGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo,
cudaGraphicsMapFlagsNone));
}
void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
checkCudaErrors(cudaGraphicsUnregisterResource(cuda_vbo_resource));
}
void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource)
{
void *ptr;
checkCudaErrors(cudaGraphicsMapResources(1, cuda_vbo_resource, 0));
size_t num_bytes;
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes,
*cuda_vbo_resource));
return ptr;
}
void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
checkCudaErrors(cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0));
}
void copyArrayFromDevice(void *host, const void *device,
struct cudaGraphicsResource **cuda_vbo_resource, int size)
{
if (cuda_vbo_resource)
{
device = mapGLBufferObject(cuda_vbo_resource);
}
checkCudaErrors(cudaMemcpy(host, device, size, cudaMemcpyDeviceToHost));
if (cuda_vbo_resource)
{
unmapGLBufferObject(*cuda_vbo_resource);
}
}
void setParameters(SimParams *hostParams)
{
// copy parameters to constant memory
checkCudaErrors(cudaMemcpyToSymbol(params, hostParams, sizeof(SimParams)));
}
//Round a / b to nearest higher integer value
uint iDivUp(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
// compute grid and thread block size for a given number of elements
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
void integrateSystem(float *pos,
float *vel,
float deltaTime,
uint numParticles)
{
thrust::device_ptr<float2> d_pos2((float2 *)pos);
thrust::device_ptr<float2> d_vel2((float2 *)vel);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_pos2, d_vel2)),
thrust::make_zip_iterator(thrust::make_tuple(d_pos2+numParticles, d_vel2+numParticles)),
integrate_functor(deltaTime));
}
void calcHash(uint *gridParticleHash,
uint *gridParticleIndex,
float *pos,
int numParticles)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// execute the kernel
calcHashD<<< numBlocks, numThreads >>>(gridParticleHash,
gridParticleIndex,
(float2 *) pos,
numParticles);
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
}
void reorderDataAndFindCellStart(uint *cellStart,
uint *cellEnd,
float *sortedPos,
float *sortedVel,
uint *gridParticleHash,
uint *gridParticleIndex,
float *oldPos,
float *oldVel,
uint numParticles,
uint numCells)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// set all cells to empty
checkCudaErrors(cudaMemset(cellStart, 0xffffffff, numCells*sizeof(uint)));
uint smemSize = sizeof(uint)*(numThreads+1);
reorderDataAndFindCellStartD<<< numBlocks, numThreads, smemSize>>>(
cellStart,
cellEnd,
(float2 *) sortedPos,
(float2 *) sortedVel,
gridParticleHash,
gridParticleIndex,
(float2 *) oldPos,
(float2 *) oldVel,
numParticles);
getLastCudaError("Kernel execution failed: reorderDataAndFindCellStartD");
}
void run(float *newVel,
float *sortedPos,
float *sortedVel,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
uint numParticles,
uint numCells)
{
// thread per particle
uint numThreads, numBlocks;
computeGridSize(numParticles, 64, numBlocks, numThreads);
// execute the kernel
run<<< numBlocks, numThreads >>>((float2 *)newVel,
(float2 *)sortedPos,
(float2 *)sortedVel,
gridParticleIndex,
cellStart,
cellEnd,
numParticles);
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
}
void sortParticles(uint *dGridParticleHash, uint *dGridParticleIndex, uint numParticles)
{
thrust::sort_by_key(thrust::device_ptr<uint>(dGridParticleHash),
thrust::device_ptr<uint>(dGridParticleHash + numParticles),
thrust::device_ptr<uint>(dGridParticleIndex));
}
} // extern "C"
|
9137a86bc0f852a68a88ce0845000bd09b0e1d3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* CUDA Machine Final Project
*
* Dong-Bang Tasi
* May 27, 2012
* Stanford University
*
*/
#include <iostream>
#include <fstream>
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/remove.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/generate.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/uniform_int_distribution.h>
#include <thrust/inner_product.h>
#include <assert.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include "omp.h"
template<typename floatType>
__global__
void SegmentedScan(floatType *curr, floatType *prev, floatType *xx, int* s, int p, int threads)
{
// __shared__ floatType smem[y_side];
int thread_id = threadIdx.x + (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x; // global thread index
int warp_id = thread_id / threads + 1;// global warp index, from 1 to p-1
int lane = thread_id & (threads - 1); // thread index within the warp
if(warp_id < p && lane ==0)
{
int start = s[warp_id-1];
int end = s[warp_id];
//curr[start] = prev[start];
//for(int i=0; i<((end-start) + threads-1)/threads; ++i)
//{
//for(int j=0; j<threads;++j)
//{
//}
//}
curr[start] = prev[start];
for(int j=start+1; j<end; ++j)
{
curr[j] = curr[j-1] + prev[j];
}
}
}
template<typename floatType>
struct id_op : public thrust::unary_function<floatType,floatType>
{
__host__ __device__
floatType operator()(floatType x) const
{
return x;
}
};
int cpu_check = 0;
int main(int argc, char **argv) {
if(argc < 3) {
printf("Run command: ./fp \"file a.txt\" \"file x.txt\"\n");
exit(0);
}
if( argc == 4){ cpu_check = 1;}
std::ifstream ifs_a(argv[1]);
if (!ifs_a.good()) {
std::cerr << "Couldn't open " << argv[1] << std::endl;
return 1;
}
typedef float CPUFloatType;
typedef float GPUFloatType;
int n, p, q, iters;
ifs_a >> n >> p >> q >> iters;
thrust::host_vector<CPUFloatType> a(n);
thrust::host_vector<int> s(p);
thrust::host_vector<int> k(n);
for(int i=0; i<n; ++i){ ifs_a >> a[i];}
for(int i=0; i<p; ++i){ ifs_a >> s[i];}
for(int i=0; i<n; ++i){ ifs_a >> k[i];}
ifs_a.close();
std::ifstream ifs_b(argv[2]);
if (!ifs_b.good()) {
std::cerr << "Couldn't open " << argv[2] << std::endl;
return 1;
}
thrust::host_vector<CPUFloatType> x(q);
for(int i=0; i<q; ++i){ ifs_b >> x[i];}
ifs_b.close();
std::cout<<"\nDim of a: "<<n<<"\nDim of x: "<<q<<"\nDim of s: "<< p<<"\n# of iters: "<<iters<<"\n\n";
// Scan the s array, and determine the structure
thrust::host_vector<int> key(n);
for(int i=0,s_pos=0; i<n; ++i)
{
if(!( s[s_pos]<i+1 && i<s[s_pos+1])){ s_pos++;}
key[i] = s_pos;
}
thrust::host_vector<CPUFloatType> cpu_buffer;
CPUFloatType* cpu_curr;
CPUFloatType* cpu_prev;
// Since x will be used several times, let's flat it to increase the memory access coalesce.
thrust::host_vector<CPUFloatType> xx(n);
for(int i=0; i<n; ++i){ xx[i] = x[k[i]];}
double cpu_start_time = 0;
double cpu_end_time = 0;
if( cpu_check != 0)
{
cpu_buffer.resize(2*n);
thrust::copy(a.begin(), a.end(), cpu_buffer.begin());
cpu_curr = &cpu_buffer[0];
cpu_prev = &cpu_buffer[n];
cpu_start_time = omp_get_wtime();
for(int iter=0; iter<iters;++iter)
{
std::swap(cpu_curr, cpu_prev);
#pragma omp parallel for
for(int i=0; i<n; ++i){ cpu_prev[i] *= xx[i];}
// Perform a segmented scan in CPU
#pragma omp parallel for
for(int i=1; i<p; ++i)
{
cpu_curr[s[i-1]] = cpu_prev[s[i-1]];
for(int j=s[i-1]+1; j<s[i]; ++j)
{
cpu_curr[j] = cpu_curr[j-1] + cpu_prev[j];
}
}
}
cpu_end_time = omp_get_wtime();
}
thrust::device_vector<GPUFloatType> gpu_buffer(2*n);
thrust::device_vector<GPUFloatType> xx_gpu = xx;
thrust::device_vector<int> s_gpu = s;
thrust::device_vector<int> key_gpu = key;
thrust::device_ptr<GPUFloatType> gpu_curr;
thrust::device_ptr<GPUFloatType> gpu_prev;
gpu_curr = &gpu_buffer[0];
gpu_prev = &gpu_buffer[n];
thrust::copy(a.begin(), a.end(), gpu_buffer.begin());
int threads_in_segment = 32;
dim3 threads(512, 1, 1);
int grid_size = (p*threads_in_segment + threads.x - 1)/threads.x;
//std::cout<<"grid:"<<grid_size<<"\n";
dim3 blocks(128, (grid_size + 127)/128);
hipEvent_t start;
hipEvent_t end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
// Since x will be used several times, let's flat it to increase the memory access coalesce.
//{
//thrust::device_vector<GPUFloatType> x_gpu = x;
//thrust::device_vector<int> k_gpu = k;
//thrust::transform( thrust::make_permutation_iterator(x_gpu.begin(),k_gpu.begin()),
//thrust::make_permutation_iterator(x_gpu.begin(), k_gpu.end()),
//xx_gpu.begin(), id_op<GPUFloatType>());
//}
// Start the GPU implementation.
for(int iter=0; iter<iters;++iter)
{
//std::cout<<"GPU_iter: "<<iter<<"\n";
// thrust::swap(gpu_curr, gpu_prev);
thrust::transform(gpu_curr, gpu_curr+n, xx_gpu.begin(), gpu_curr, thrust::multiplies<GPUFloatType>());
hipLaunchKernelGGL(( SegmentedScan<GPUFloatType>), dim3(blocks), dim3(threads), 0, 0, thrust::raw_pointer_cast(gpu_curr),
thrust::raw_pointer_cast(gpu_curr),
thrust::raw_pointer_cast(&xx_gpu[0]),
thrust::raw_pointer_cast(&s_gpu[0]), p, threads_in_segment);
}
hipEventRecord(end, 0);
hipEventSynchronize(end);
float elapsed_time;
hipEventElapsedTime(&elapsed_time, start, end);
thrust::host_vector<GPUFloatType> gpu_result_on_host(n);
thrust::copy(gpu_curr, gpu_curr+n, gpu_result_on_host.begin());
if (cpu_check != 0)
{ double tol = 10e-3;
std::cout<<"The CPU running time of my code for "<<iters<<" iterations is: "<<(cpu_end_time-cpu_start_time)*1000<< " milliseconds.\n\n";
std::cout<<"Checking the correctness by the result from CPU\n\n";
std::ofstream ofs_cpu("b_cpu.txt");
for (int i = 0; i < n; ++i) {
if( std::abs(cpu_curr[i] - gpu_result_on_host[i]) > tol)
{
// std::cout<<"i: "<<i<<", "<<std::abs(cpu_curr[i] - gpu_result_on_host[i]) <<"\n";
// assert( std::abs(cpu_curr[i] - gpu_result_on_host[i]) < tol) ;
}
ofs_cpu << cpu_curr[i] << " ";
}
ofs_cpu.close();
}
std::cout<<"The running time of my code for "<<iters<<" iterations is: "<<elapsed_time<< " milliseconds.\n\n";
std::ofstream ofs_gpu("b.txt");
for (int i = 0; i < n; ++i) {
ofs_gpu << gpu_result_on_host[i] << " ";
}
ofs_gpu.close();
return 0;
}
| 9137a86bc0f852a68a88ce0845000bd09b0e1d3a.cu | /* CUDA Machine Final Project
*
* Dong-Bang Tasi
* May 27, 2012
* Stanford University
*
*/
#include <iostream>
#include <fstream>
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/remove.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/generate.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/uniform_int_distribution.h>
#include <thrust/inner_product.h>
#include <assert.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include "omp.h"
template<typename floatType>
__global__
void SegmentedScan(floatType *curr, floatType *prev, floatType *xx, int* s, int p, int threads)
{
// __shared__ floatType smem[y_side];
int thread_id = threadIdx.x + (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x; // global thread index
int warp_id = thread_id / threads + 1;// global warp index, from 1 to p-1
int lane = thread_id & (threads - 1); // thread index within the warp
if(warp_id < p && lane ==0)
{
int start = s[warp_id-1];
int end = s[warp_id];
//curr[start] = prev[start];
//for(int i=0; i<((end-start) + threads-1)/threads; ++i)
//{
//for(int j=0; j<threads;++j)
//{
//}
//}
curr[start] = prev[start];
for(int j=start+1; j<end; ++j)
{
curr[j] = curr[j-1] + prev[j];
}
}
}
template<typename floatType>
struct id_op : public thrust::unary_function<floatType,floatType>
{
__host__ __device__
floatType operator()(floatType x) const
{
return x;
}
};
int cpu_check = 0;
int main(int argc, char **argv) {
if(argc < 3) {
printf("Run command: ./fp \"file a.txt\" \"file x.txt\"\n");
exit(0);
}
if( argc == 4){ cpu_check = 1;}
std::ifstream ifs_a(argv[1]);
if (!ifs_a.good()) {
std::cerr << "Couldn't open " << argv[1] << std::endl;
return 1;
}
typedef float CPUFloatType;
typedef float GPUFloatType;
int n, p, q, iters;
ifs_a >> n >> p >> q >> iters;
thrust::host_vector<CPUFloatType> a(n);
thrust::host_vector<int> s(p);
thrust::host_vector<int> k(n);
for(int i=0; i<n; ++i){ ifs_a >> a[i];}
for(int i=0; i<p; ++i){ ifs_a >> s[i];}
for(int i=0; i<n; ++i){ ifs_a >> k[i];}
ifs_a.close();
std::ifstream ifs_b(argv[2]);
if (!ifs_b.good()) {
std::cerr << "Couldn't open " << argv[2] << std::endl;
return 1;
}
thrust::host_vector<CPUFloatType> x(q);
for(int i=0; i<q; ++i){ ifs_b >> x[i];}
ifs_b.close();
std::cout<<"\nDim of a: "<<n<<"\nDim of x: "<<q<<"\nDim of s: "<< p<<"\n# of iters: "<<iters<<"\n\n";
// Scan the s array, and determine the structure
thrust::host_vector<int> key(n);
for(int i=0,s_pos=0; i<n; ++i)
{
if(!( s[s_pos]<i+1 && i<s[s_pos+1])){ s_pos++;}
key[i] = s_pos;
}
thrust::host_vector<CPUFloatType> cpu_buffer;
CPUFloatType* cpu_curr;
CPUFloatType* cpu_prev;
// Since x will be used several times, let's flat it to increase the memory access coalesce.
thrust::host_vector<CPUFloatType> xx(n);
for(int i=0; i<n; ++i){ xx[i] = x[k[i]];}
double cpu_start_time = 0;
double cpu_end_time = 0;
if( cpu_check != 0)
{
cpu_buffer.resize(2*n);
thrust::copy(a.begin(), a.end(), cpu_buffer.begin());
cpu_curr = &cpu_buffer[0];
cpu_prev = &cpu_buffer[n];
cpu_start_time = omp_get_wtime();
for(int iter=0; iter<iters;++iter)
{
std::swap(cpu_curr, cpu_prev);
#pragma omp parallel for
for(int i=0; i<n; ++i){ cpu_prev[i] *= xx[i];}
// Perform a segmented scan in CPU
#pragma omp parallel for
for(int i=1; i<p; ++i)
{
cpu_curr[s[i-1]] = cpu_prev[s[i-1]];
for(int j=s[i-1]+1; j<s[i]; ++j)
{
cpu_curr[j] = cpu_curr[j-1] + cpu_prev[j];
}
}
}
cpu_end_time = omp_get_wtime();
}
thrust::device_vector<GPUFloatType> gpu_buffer(2*n);
thrust::device_vector<GPUFloatType> xx_gpu = xx;
thrust::device_vector<int> s_gpu = s;
thrust::device_vector<int> key_gpu = key;
thrust::device_ptr<GPUFloatType> gpu_curr;
thrust::device_ptr<GPUFloatType> gpu_prev;
gpu_curr = &gpu_buffer[0];
gpu_prev = &gpu_buffer[n];
thrust::copy(a.begin(), a.end(), gpu_buffer.begin());
int threads_in_segment = 32;
dim3 threads(512, 1, 1);
int grid_size = (p*threads_in_segment + threads.x - 1)/threads.x;
//std::cout<<"grid:"<<grid_size<<"\n";
dim3 blocks(128, (grid_size + 127)/128);
cudaEvent_t start;
cudaEvent_t end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
// Since x will be used several times, let's flat it to increase the memory access coalesce.
//{
//thrust::device_vector<GPUFloatType> x_gpu = x;
//thrust::device_vector<int> k_gpu = k;
//thrust::transform( thrust::make_permutation_iterator(x_gpu.begin(),k_gpu.begin()),
//thrust::make_permutation_iterator(x_gpu.begin(), k_gpu.end()),
//xx_gpu.begin(), id_op<GPUFloatType>());
//}
// Start the GPU implementation.
for(int iter=0; iter<iters;++iter)
{
//std::cout<<"GPU_iter: "<<iter<<"\n";
// thrust::swap(gpu_curr, gpu_prev);
thrust::transform(gpu_curr, gpu_curr+n, xx_gpu.begin(), gpu_curr, thrust::multiplies<GPUFloatType>());
SegmentedScan<GPUFloatType><<<blocks, threads>>>(thrust::raw_pointer_cast(gpu_curr),
thrust::raw_pointer_cast(gpu_curr),
thrust::raw_pointer_cast(&xx_gpu[0]),
thrust::raw_pointer_cast(&s_gpu[0]), p, threads_in_segment);
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float elapsed_time;
cudaEventElapsedTime(&elapsed_time, start, end);
thrust::host_vector<GPUFloatType> gpu_result_on_host(n);
thrust::copy(gpu_curr, gpu_curr+n, gpu_result_on_host.begin());
if (cpu_check != 0)
{ double tol = 10e-3;
std::cout<<"The CPU running time of my code for "<<iters<<" iterations is: "<<(cpu_end_time-cpu_start_time)*1000<< " milliseconds.\n\n";
std::cout<<"Checking the correctness by the result from CPU\n\n";
std::ofstream ofs_cpu("b_cpu.txt");
for (int i = 0; i < n; ++i) {
if( std::abs(cpu_curr[i] - gpu_result_on_host[i]) > tol)
{
// std::cout<<"i: "<<i<<", "<<std::abs(cpu_curr[i] - gpu_result_on_host[i]) <<"\n";
// assert( std::abs(cpu_curr[i] - gpu_result_on_host[i]) < tol) ;
}
ofs_cpu << cpu_curr[i] << " ";
}
ofs_cpu.close();
}
std::cout<<"The running time of my code for "<<iters<<" iterations is: "<<elapsed_time<< " milliseconds.\n\n";
std::ofstream ofs_gpu("b.txt");
for (int i = 0; i < n; ++i) {
ofs_gpu << gpu_result_on_host[i] << " ";
}
ofs_gpu.close();
return 0;
}
|
6b8fdcafbe28ed19b3f637becdc02a94bc4ac7bc.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#define SIZE 10
__global__ void max(int *a , int *c) // kernel function definition
{
int i = threadIdx.x; // initialize i to thread ID
*c = a[0];
//printf("a[i] is %d \n",a[i]);
atomicMin(c,a[i]);
//printf("max is %d \n",*c);
}
int main()
{
int i;
srand(time(NULL)); //makes use of the computer's internal clock to control the choice of the seed
int a[10]={2,41,21,74,86,45,92,35,49,50};
int c;
int *dev_a, *dev_c; //GPU / device parameters
hipMalloc((void **) &dev_a, SIZE*sizeof(int)); //assign memory to parameters on GPU
hipMalloc((void **) &dev_c, SIZE*sizeof(int));
for( i = 0 ; i < SIZE ; i++)
{
a[i] = i; // rand()% 1000 + 1; // input the numbers
//printf("%d ",a[i]);
}
hipMemcpy(dev_c, &c, sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dev_a, a, SIZE*sizeof(int),hipMemcpyHostToDevice); //copy the array from CPU to GPU
hipLaunchKernelGGL(( max), dim3(1),dim3(SIZE), 0, 0, dev_a,dev_c); // call kernel function <<<number of blocks, number of threads
hipMemcpy(&c, dev_c, sizeof(int),hipMemcpyDeviceToHost); // copy the result back from GPU to CPU
printf("\nmax = %d ",c);
hipFree(dev_a); // Free the allocated memory
hipFree(dev_c);
printf("");
return 0;
}
| 6b8fdcafbe28ed19b3f637becdc02a94bc4ac7bc.cu | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#define SIZE 10
__global__ void max(int *a , int *c) // kernel function definition
{
int i = threadIdx.x; // initialize i to thread ID
*c = a[0];
//printf("a[i] is %d \n",a[i]);
atomicMin(c,a[i]);
//printf("max is %d \n",*c);
}
int main()
{
int i;
srand(time(NULL)); //makes use of the computer's internal clock to control the choice of the seed
int a[10]={2,41,21,74,86,45,92,35,49,50};
int c;
int *dev_a, *dev_c; //GPU / device parameters
cudaMalloc((void **) &dev_a, SIZE*sizeof(int)); //assign memory to parameters on GPU
cudaMalloc((void **) &dev_c, SIZE*sizeof(int));
for( i = 0 ; i < SIZE ; i++)
{
a[i] = i; // rand()% 1000 + 1; // input the numbers
//printf("%d ",a[i]);
}
cudaMemcpy(dev_c, &c, sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_a, a, SIZE*sizeof(int),cudaMemcpyHostToDevice); //copy the array from CPU to GPU
max<<<1,SIZE>>>(dev_a,dev_c); // call kernel function <<<number of blocks, number of threads
cudaMemcpy(&c, dev_c, sizeof(int),cudaMemcpyDeviceToHost); // copy the result back from GPU to CPU
printf("\nmax = %d ",c);
cudaFree(dev_a); // Free the allocated memory
cudaFree(dev_c);
printf("");
return 0;
}
|
186692c47e4ef549f8723696254f09ee1b81e036.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <vector>
#include <cmath>
#include "sdf.cuh"
#include <eigen3/Eigen/Dense>
#define INDEX3(data, x, y, z, dims) (data[(z)*dims(1)*dims(0) + (y)*dims(0) + (x)])
#define IS_IN_RANGE3(x, y, z) ((x) >= 0 && (x) < dims(0) && (y) >= 0 && (y) < dims(1) && (z) >= 0 && (z) < dims(2))
#define OFFSET3(dx, dy, dz) (IS_IN_RANGE3((x + dx), (y + dy), (z + dz)) ? \
data[(z + dz)*dims(1)*dims(0) + (y + dy)*dims(0) + (x + dx)] : 1e3)
void sdf(std::vector<uint8_t>& walls, std::vector<float>& out, Eigen::Array3i dims, int trunc) {
//// -> memory
//int size_verts = verts.size();
//float *verts_;
//hipMalloc(&verts_, size_verts*sizeof(float));
//hipMemcpy(verts_, verts.data(), size_verts*sizeof(int32_t), hipMemcpyHostToDevice);
//// <-
size_t n_elems = dims(0)*dims(1)*dims(2);
assert(walls.size() == n_elems);
// -> thread blocks
Eigen::Array3i dims_padded = dims.unaryExpr([](const int x) {
int pad = 8 - x%8;
int f = x%8 > 0 ? 1 : 0;
return x + f*pad;
});
assert(dims_padded(0)%8 == 0);
assert(dims_padded(1)%8 == 0);
assert(dims_padded(2)%8 == 0);
//size_t n_triangles = faces.size();
dim3 blocks(dims_padded(0)/8, dims_padded(1)/8, dims_padded(2)/8);
dim3 threads(8, 8, 8);
// <-
// -> init
uint8_t* water;
hipMalloc(&water, n_elems*sizeof(uint8_t));
hipMemcpy(water, walls.data(), n_elems*sizeof(uint8_t), hipMemcpyHostToDevice);
float *sdf;
hipMalloc(&sdf, n_elems*sizeof(float));
float *tmp;
hipMalloc(&tmp, n_elems*sizeof(float));
// <-
// -> init
hipLaunchKernelGGL(( init_kernel), dim3(blocks), dim3(threads), 0, 0, water, sdf, dims, trunc);
hipDeviceSynchronize();
// <-
// -> thicken
int n_iteration = dims.maxCoeff();
for (int i = 0; i < n_iteration; i++) {
hipLaunchKernelGGL(( thicken_kernel), dim3(blocks), dim3(threads), 0, 0, sdf, tmp, dims, trunc);
hipDeviceSynchronize();
hipMemcpy(sdf, tmp, n_elems*sizeof(float), hipMemcpyDeviceToDevice);
}
// <-
// copy data
hipMemcpy(out.data(), sdf, n_elems*sizeof(float), hipMemcpyDeviceToHost);
hipFree(water);
// <-
};
__global__ void init_kernel(uint8_t* water, float* tmp, Eigen::Array3i dims, int trunc) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
if (!IS_IN_RANGE3(x, y, z))
return;
uint8_t v = INDEX3(water, x, y, z, dims);
if (v == ID_SOLID) {
INDEX3(tmp, x, y, z, dims) = -1e3;
} else if (v == ID_WALL) {
INDEX3(tmp, x, y, z, dims) = 0;
} else if (v == ID_WATER) {
INDEX3(tmp, x, y, z, dims) = 1e3;
} else {
assert(0);
}
}
__device__ int min_neightbor(float* data, int x, int y, int z, Eigen::Array3i dims) {
float v[26] = {};
v[0] = OFFSET3(-1, -1, -1);
v[1] = OFFSET3(-1, -1, 0);
v[2] = OFFSET3(-1, -1, 1);
v[3] = OFFSET3(-1, 0, -1);
v[4] = OFFSET3(-1, 0, 0);
v[5] = OFFSET3(-1, 0, 1);
v[6] = OFFSET3(-1, 1, -1);
v[7] = OFFSET3(-1, 1, 0);
v[8] = OFFSET3(-1, 1, 1);
v[9] = OFFSET3(0, -1, -1);
v[10] = OFFSET3(0, -1, 0);
v[11] = OFFSET3(0, -1, 1);
v[12] = OFFSET3(0, 0, -1);
v[13] = OFFSET3(0, 0, 1);
v[14] = OFFSET3(0, 1, -1);
v[15] = OFFSET3(0, 1, 0);
v[16] = OFFSET3(0, 1, 1);
v[17] = OFFSET3(1, -1, -1);
v[18] = OFFSET3(1, -1, 0);
v[19] = OFFSET3(1, -1, 1);
v[20] = OFFSET3(1, 0, -1);
v[21] = OFFSET3(1, 0, 0);
v[22] = OFFSET3(1, 0, 1);
v[23] = OFFSET3(1, 1, -1);
v[24] = OFFSET3(1, 1, 0);
v[25] = OFFSET3(1, 1, 1);
float vmin = 1e3;
for (int i =0; i < 26; i++) {
float vi = std::abs(v[i]);
vmin = vi < vmin ? vi : vmin;
}
return vmin;
}
template <typename T> __device__ int sign(T val) {
return (T(0) < val) - (val < T(0));
}
template __device__ int sign<float>(float);
template __device__ int sign<int>(int);
__global__ void thicken_kernel(float* sdf, float* tmp, Eigen::Array3i dims, int trunc) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
if (!IS_IN_RANGE3(x, y, z))
return;
auto v = INDEX3(sdf, x, y, z, dims);
//INDEX3(tmp, x, y, z, dims) = trunc;
auto vmin = min_neightbor(sdf, x, y, z, dims);
auto s = sign(v);
INDEX3(tmp, x, y, z, dims) = s*(vmin+ 1.0);
}
| 186692c47e4ef549f8723696254f09ee1b81e036.cu | #include <cuda_runtime_api.h>
#include <vector>
#include <cmath>
#include "sdf.cuh"
#include <eigen3/Eigen/Dense>
#define INDEX3(data, x, y, z, dims) (data[(z)*dims(1)*dims(0) + (y)*dims(0) + (x)])
#define IS_IN_RANGE3(x, y, z) ((x) >= 0 && (x) < dims(0) && (y) >= 0 && (y) < dims(1) && (z) >= 0 && (z) < dims(2))
#define OFFSET3(dx, dy, dz) (IS_IN_RANGE3((x + dx), (y + dy), (z + dz)) ? \
data[(z + dz)*dims(1)*dims(0) + (y + dy)*dims(0) + (x + dx)] : 1e3)
void sdf(std::vector<uint8_t>& walls, std::vector<float>& out, Eigen::Array3i dims, int trunc) {
//// -> memory
//int size_verts = verts.size();
//float *verts_;
//cudaMalloc(&verts_, size_verts*sizeof(float));
//cudaMemcpy(verts_, verts.data(), size_verts*sizeof(int32_t), cudaMemcpyHostToDevice);
//// <-
size_t n_elems = dims(0)*dims(1)*dims(2);
assert(walls.size() == n_elems);
// -> thread blocks
Eigen::Array3i dims_padded = dims.unaryExpr([](const int x) {
int pad = 8 - x%8;
int f = x%8 > 0 ? 1 : 0;
return x + f*pad;
});
assert(dims_padded(0)%8 == 0);
assert(dims_padded(1)%8 == 0);
assert(dims_padded(2)%8 == 0);
//size_t n_triangles = faces.size();
dim3 blocks(dims_padded(0)/8, dims_padded(1)/8, dims_padded(2)/8);
dim3 threads(8, 8, 8);
// <-
// -> init
uint8_t* water;
cudaMalloc(&water, n_elems*sizeof(uint8_t));
cudaMemcpy(water, walls.data(), n_elems*sizeof(uint8_t), cudaMemcpyHostToDevice);
float *sdf;
cudaMalloc(&sdf, n_elems*sizeof(float));
float *tmp;
cudaMalloc(&tmp, n_elems*sizeof(float));
// <-
// -> init
init_kernel<<<blocks, threads>>>(water, sdf, dims, trunc);
cudaDeviceSynchronize();
// <-
// -> thicken
int n_iteration = dims.maxCoeff();
for (int i = 0; i < n_iteration; i++) {
thicken_kernel<<<blocks, threads>>>(sdf, tmp, dims, trunc);
cudaDeviceSynchronize();
cudaMemcpy(sdf, tmp, n_elems*sizeof(float), cudaMemcpyDeviceToDevice);
}
// <-
// copy data
cudaMemcpy(out.data(), sdf, n_elems*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(water);
// <-
};
__global__ void init_kernel(uint8_t* water, float* tmp, Eigen::Array3i dims, int trunc) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
if (!IS_IN_RANGE3(x, y, z))
return;
uint8_t v = INDEX3(water, x, y, z, dims);
if (v == ID_SOLID) {
INDEX3(tmp, x, y, z, dims) = -1e3;
} else if (v == ID_WALL) {
INDEX3(tmp, x, y, z, dims) = 0;
} else if (v == ID_WATER) {
INDEX3(tmp, x, y, z, dims) = 1e3;
} else {
assert(0);
}
}
__device__ int min_neightbor(float* data, int x, int y, int z, Eigen::Array3i dims) {
float v[26] = {};
v[0] = OFFSET3(-1, -1, -1);
v[1] = OFFSET3(-1, -1, 0);
v[2] = OFFSET3(-1, -1, 1);
v[3] = OFFSET3(-1, 0, -1);
v[4] = OFFSET3(-1, 0, 0);
v[5] = OFFSET3(-1, 0, 1);
v[6] = OFFSET3(-1, 1, -1);
v[7] = OFFSET3(-1, 1, 0);
v[8] = OFFSET3(-1, 1, 1);
v[9] = OFFSET3(0, -1, -1);
v[10] = OFFSET3(0, -1, 0);
v[11] = OFFSET3(0, -1, 1);
v[12] = OFFSET3(0, 0, -1);
v[13] = OFFSET3(0, 0, 1);
v[14] = OFFSET3(0, 1, -1);
v[15] = OFFSET3(0, 1, 0);
v[16] = OFFSET3(0, 1, 1);
v[17] = OFFSET3(1, -1, -1);
v[18] = OFFSET3(1, -1, 0);
v[19] = OFFSET3(1, -1, 1);
v[20] = OFFSET3(1, 0, -1);
v[21] = OFFSET3(1, 0, 0);
v[22] = OFFSET3(1, 0, 1);
v[23] = OFFSET3(1, 1, -1);
v[24] = OFFSET3(1, 1, 0);
v[25] = OFFSET3(1, 1, 1);
float vmin = 1e3;
for (int i =0; i < 26; i++) {
float vi = std::abs(v[i]);
vmin = vi < vmin ? vi : vmin;
}
return vmin;
}
template <typename T> __device__ int sign(T val) {
return (T(0) < val) - (val < T(0));
}
template __device__ int sign<float>(float);
template __device__ int sign<int>(int);
__global__ void thicken_kernel(float* sdf, float* tmp, Eigen::Array3i dims, int trunc) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
if (!IS_IN_RANGE3(x, y, z))
return;
auto v = INDEX3(sdf, x, y, z, dims);
//INDEX3(tmp, x, y, z, dims) = trunc;
auto vmin = min_neightbor(sdf, x, y, z, dims);
auto s = sign(v);
INDEX3(tmp, x, y, z, dims) = s*(vmin+ 1.0);
}
|
e05067dfc354a7df5dd26be2f61e0637cc9f6cbd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ static int ros_Integrator_ros3(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T,
// Integration parameters
const int autonomous, const int vectorTol, const int Max_no_steps,
const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit,
const double FacMin, const double FacMax, const double FacRej, const double FacSafe,
// Status parameters
int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng,
// cuda global mem buffers
const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0,
double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr,
// for update_rconst
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
// VL_GLO
const int VL_GLO,
const int offset)
{
int index = blockIdx.x*blockDim.x+threadIdx.x+offset;
double H, Hnew, HC, HC0,HC1, HG, Fac; // Tau - not used
double Err; //*varErr;
int direction;
int rejectLastH, rejectMoreH;
const double DELTAMIN = 1.0E-5;
const int ros_S = 3;
// ~~~> Initial preparations
T = Tstart;
Hexit = 0.0;
H = fmin(Hstart,Hmax);
if (fabs(H) <= 10.0*roundoff)
H = DELTAMIN;
if (Tend >= Tstart)
{
direction = + 1;
}
else
{
direction = - 1;
}
rejectLastH=0;
rejectMoreH=0;
// TimeLoop:
while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO))
{
if (Nstp > Max_no_steps) // Too many steps
return -6;
// Step size too small
if (H <= roundoff){ // Step size too small
//if (((T+ 0.1*H) == T) || (H <= roundoff)) {
return -7;
}
// ~~~> Limit H if necessary to avoid going beyond Tend
Hexit = H;
H = fmin(H,fabs(Tend-T));
// ~~~> Compute the function at current time
Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO);
// ~~~> Compute the function derivative with respect to T
if (!autonomous)
ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read
// ~~~> Compute the Jacobian at current time
Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ
// ~~~> Repeat step calculation until current step accepted
// UntilAccepted:
while(1)
{
ros_PrepareMatrix(H, direction, 0.43586652150845899941601945119356E+00 , jac0, Ghimj, Nsng, Ndec, VL_GLO);
{ // istage=0
for (int i=0; i<NVAR; i++){
K(index,0,i) = Fcn0(index,i); // FCN0 Read
}
if ((!autonomous))
{
HG = direction*H*0.43586652150845899941601945119356E+00;
for (int i=0; i<NVAR; i++){
K(index,0,i) += dFdT(index,i)*HG;
}
}
ros_Solve(Ghimj, K, Nsol, 0, ros_S);
} // Stage
{ // istage = 1
for (int i=0; i<NVAR; i++){
varNew(index,i) = K(index,0,i) + var(index,i);
}
Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap
HC = -0.10156171083877702091975600115545E+01/(direction*H);
for (int i=0; i<NVAR; i++){
double tmp = K(index,0,i);
K(index,1,i) = tmp*HC + varNew(index,i);
}
if ((!autonomous))
{
HG = direction*H*0.24291996454816804366592249683314E+00;
for (int i=0; i<NVAR; i++){
K(index,1,i) += dFdT(index,i)*HG;
}
}
// R ,RW, RW, R, R
ros_Solve(Ghimj, K, Nsol, 1, ros_S);
} // Stage
{
int istage = 2;
HC0 = 0.40759956452537699824805835358067E+01/(direction*H);
HC1 = 0.92076794298330791242156818474003E+01/(direction*H);
for (int i=0; i<NVAR; i++){
K(index,2,i) = K(index,1,i)*HC1 + K(index,0,i)*HC0 + varNew(index,i);
}
if ((!autonomous) )
{
HG = direction*H*0.21851380027664058511513169485832E+01;
for (int i=0; i<NVAR; i++){
K(index,istage,i) += dFdT(index,i)*HG;
}
}
ros_Solve(Ghimj, K, Nsol, istage, ros_S);
} // Stage
// ~~~> Compute the new solution
for (int i=0; i<NVAR; i++){
varNew(index,i) = K(index,0,i) + K(index,1,i)*0.61697947043828245592553615689730E+01 + K(index,2,i)*(-0.42772256543218573326238373806514) + var(index,i) ;
varErr(index,i) = K(index,0,i)/2 + K(index,1,i)*(-0.29079558716805469821718236208017E+01) + K(index,2,i)*(0.22354069897811569627360909276199);
}
Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol);
// ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax
Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/3.0)));
Hnew = H*Fac;
// ~~~> Check the error magnitude and adjust step size
Nstp = Nstp+ 1;
if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step
{
Nacc = Nacc + 1;
for (int j=0; j<NVAR ; j++)
var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read
T = T + direction*H;
Hnew = fmax(Hmin,fmin(Hnew,Hmax));
if (rejectLastH) // No step size increase after a rejected step
Hnew = fmin(Hnew,H);
rejectLastH = 0;
rejectMoreH = 0;
H = Hnew;
break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED
}
else // ~~~> Reject step
{
if (rejectMoreH)
Hnew = H*FacRej;
rejectMoreH = rejectLastH;
rejectLastH = 1;
H = Hnew;
if (Nacc >= 1)
Nrej += 1;
} // Err <= 1
} // UntilAccepted
} // TimeLoop
// ~~~> Succesful exit
return 0; // ~~~> The integration was successful
}
__global__
void Rosenbrock_ros3(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus,
const int autonomous, const int vectorTol, const int UplimTol, const int Max_no_steps,
double * __restrict__ d_jac0, double * __restrict__ d_Ghimj, double * __restrict__ d_varNew, double * __restrict__ d_K, double * __restrict__ d_varErr,double * __restrict__ d_dFdT ,double * __restrict__ d_Fcn0, double * __restrict__ d_var, double * __restrict__ d_fix, double * __restrict__ d_rconst,
const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff,
const double * __restrict__ absTol, const double * __restrict__ relTol,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
const int VL_GLO, const int offset)
{
int index = blockIdx.x*blockDim.x+threadIdx.x + offset;
/*
* In theory someone can aggregate accesses together,
* however due to algorithm, threads access
* different parts of memory, making it harder to
* optimize accesses.
*
*/
double *Ghimj = &d_Ghimj[index*LU_NONZERO];
double *K = &d_K[index*NVAR*3];
double *varNew = &d_varNew[index*NVAR];
double *Fcn0 = &d_Fcn0[index*NVAR];
double *dFdT = &d_dFdT[index*NVAR];
double *jac0 = &d_jac0[index*LU_NONZERO];
double *varErr = &d_varErr[index*NVAR];
double *var = &d_var[index*NSPEC];
double *fix = &d_fix[index*NFIX];
double *rconst = &d_rconst[index*NREACT];
const int method = 2;
if (index < VL_GLO)
{
int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng;
double Texit, Hexit;
Nfun = 0;
Njac = 0;
Nstp = 0;
Nacc = 0;
Nrej = 0;
Ndec = 0;
Nsol = 0;
Nsng = 0;
/* Copy data from global memory to temporary array */
/*
* Optimization note: if we ever have enough constant
* memory, we could use it for storing the data.
* In current architectures if we use constant memory
* only a few threads will be able to run on the fly.
*
*/
for (int i=0; i<NSPEC; i++)
var(index,i) = conc(index,i);
for (int i=0; i<NFIX; i++)
fix(index,i) = conc(index,NVAR+i);
update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO, offset);
ros_Integrator_ros3(var, fix, Tstart, Tend, Texit,
// Integration parameters
autonomous, vectorTol, Max_no_steps,
roundoff, Hmin, Hmax, Hstart, Hexit,
FacMin, FacMax, FacRej, FacSafe,
// Status parameters
Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng,
// cuda global mem buffers
rconst, absTol, relTol, varNew, Fcn0,
K, dFdT, jac0, Ghimj, varErr,
// For update rconst
khet_st, khet_tr, jx,
VL_GLO, offset
);
for (int i=0; i<NVAR; i++)
conc(index,i) = var(index,i);
/* Statistics */
istatus(index,ifun) = Nfun;
istatus(index,ijac) = Njac;
istatus(index,istp) = Nstp;
istatus(index,iacc) = Nacc;
istatus(index,irej) = Nrej;
istatus(index,idec) = Ndec;
istatus(index,isol) = Nsol;
istatus(index,isng) = Nsng;
// Last T and H
rstatus(index,itexit) = Texit;
rstatus(index,ihexit) = Hexit;
}
}
| e05067dfc354a7df5dd26be2f61e0637cc9f6cbd.cu |
__device__ static int ros_Integrator_ros3(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T,
// Integration parameters
const int autonomous, const int vectorTol, const int Max_no_steps,
const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit,
const double FacMin, const double FacMax, const double FacRej, const double FacSafe,
// Status parameters
int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng,
// cuda global mem buffers
const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0,
double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr,
// for update_rconst
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
// VL_GLO
const int VL_GLO,
const int offset)
{
int index = blockIdx.x*blockDim.x+threadIdx.x+offset;
double H, Hnew, HC, HC0,HC1, HG, Fac; // Tau - not used
double Err; //*varErr;
int direction;
int rejectLastH, rejectMoreH;
const double DELTAMIN = 1.0E-5;
const int ros_S = 3;
// ~~~> Initial preparations
T = Tstart;
Hexit = 0.0;
H = fmin(Hstart,Hmax);
if (fabs(H) <= 10.0*roundoff)
H = DELTAMIN;
if (Tend >= Tstart)
{
direction = + 1;
}
else
{
direction = - 1;
}
rejectLastH=0;
rejectMoreH=0;
// TimeLoop:
while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO))
{
if (Nstp > Max_no_steps) // Too many steps
return -6;
// Step size too small
if (H <= roundoff){ // Step size too small
//if (((T+ 0.1*H) == T) || (H <= roundoff)) {
return -7;
}
// ~~~> Limit H if necessary to avoid going beyond Tend
Hexit = H;
H = fmin(H,fabs(Tend-T));
// ~~~> Compute the function at current time
Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO);
// ~~~> Compute the function derivative with respect to T
if (!autonomous)
ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read
// ~~~> Compute the Jacobian at current time
Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ
// ~~~> Repeat step calculation until current step accepted
// UntilAccepted:
while(1)
{
ros_PrepareMatrix(H, direction, 0.43586652150845899941601945119356E+00 , jac0, Ghimj, Nsng, Ndec, VL_GLO);
{ // istage=0
for (int i=0; i<NVAR; i++){
K(index,0,i) = Fcn0(index,i); // FCN0 Read
}
if ((!autonomous))
{
HG = direction*H*0.43586652150845899941601945119356E+00;
for (int i=0; i<NVAR; i++){
K(index,0,i) += dFdT(index,i)*HG;
}
}
ros_Solve(Ghimj, K, Nsol, 0, ros_S);
} // Stage
{ // istage = 1
for (int i=0; i<NVAR; i++){
varNew(index,i) = K(index,0,i) + var(index,i);
}
Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap
HC = -0.10156171083877702091975600115545E+01/(direction*H);
for (int i=0; i<NVAR; i++){
double tmp = K(index,0,i);
K(index,1,i) = tmp*HC + varNew(index,i);
}
if ((!autonomous))
{
HG = direction*H*0.24291996454816804366592249683314E+00;
for (int i=0; i<NVAR; i++){
K(index,1,i) += dFdT(index,i)*HG;
}
}
// R ,RW, RW, R, R
ros_Solve(Ghimj, K, Nsol, 1, ros_S);
} // Stage
{
int istage = 2;
HC0 = 0.40759956452537699824805835358067E+01/(direction*H);
HC1 = 0.92076794298330791242156818474003E+01/(direction*H);
for (int i=0; i<NVAR; i++){
K(index,2,i) = K(index,1,i)*HC1 + K(index,0,i)*HC0 + varNew(index,i);
}
if ((!autonomous) )
{
HG = direction*H*0.21851380027664058511513169485832E+01;
for (int i=0; i<NVAR; i++){
K(index,istage,i) += dFdT(index,i)*HG;
}
}
ros_Solve(Ghimj, K, Nsol, istage, ros_S);
} // Stage
// ~~~> Compute the new solution
for (int i=0; i<NVAR; i++){
varNew(index,i) = K(index,0,i) + K(index,1,i)*0.61697947043828245592553615689730E+01 + K(index,2,i)*(-0.42772256543218573326238373806514) + var(index,i) ;
varErr(index,i) = K(index,0,i)/2 + K(index,1,i)*(-0.29079558716805469821718236208017E+01) + K(index,2,i)*(0.22354069897811569627360909276199);
}
Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol);
// ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax
Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/3.0)));
Hnew = H*Fac;
// ~~~> Check the error magnitude and adjust step size
Nstp = Nstp+ 1;
if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step
{
Nacc = Nacc + 1;
for (int j=0; j<NVAR ; j++)
var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read
T = T + direction*H;
Hnew = fmax(Hmin,fmin(Hnew,Hmax));
if (rejectLastH) // No step size increase after a rejected step
Hnew = fmin(Hnew,H);
rejectLastH = 0;
rejectMoreH = 0;
H = Hnew;
break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED
}
else // ~~~> Reject step
{
if (rejectMoreH)
Hnew = H*FacRej;
rejectMoreH = rejectLastH;
rejectLastH = 1;
H = Hnew;
if (Nacc >= 1)
Nrej += 1;
} // Err <= 1
} // UntilAccepted
} // TimeLoop
// ~~~> Succesful exit
return 0; // ~~~> The integration was successful
}
__global__
void Rosenbrock_ros3(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus,
const int autonomous, const int vectorTol, const int UplimTol, const int Max_no_steps,
double * __restrict__ d_jac0, double * __restrict__ d_Ghimj, double * __restrict__ d_varNew, double * __restrict__ d_K, double * __restrict__ d_varErr,double * __restrict__ d_dFdT ,double * __restrict__ d_Fcn0, double * __restrict__ d_var, double * __restrict__ d_fix, double * __restrict__ d_rconst,
const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff,
const double * __restrict__ absTol, const double * __restrict__ relTol,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
const int VL_GLO, const int offset)
{
int index = blockIdx.x*blockDim.x+threadIdx.x + offset;
/*
* In theory someone can aggregate accesses together,
* however due to algorithm, threads access
* different parts of memory, making it harder to
* optimize accesses.
*
*/
double *Ghimj = &d_Ghimj[index*LU_NONZERO];
double *K = &d_K[index*NVAR*3];
double *varNew = &d_varNew[index*NVAR];
double *Fcn0 = &d_Fcn0[index*NVAR];
double *dFdT = &d_dFdT[index*NVAR];
double *jac0 = &d_jac0[index*LU_NONZERO];
double *varErr = &d_varErr[index*NVAR];
double *var = &d_var[index*NSPEC];
double *fix = &d_fix[index*NFIX];
double *rconst = &d_rconst[index*NREACT];
const int method = 2;
if (index < VL_GLO)
{
int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng;
double Texit, Hexit;
Nfun = 0;
Njac = 0;
Nstp = 0;
Nacc = 0;
Nrej = 0;
Ndec = 0;
Nsol = 0;
Nsng = 0;
/* Copy data from global memory to temporary array */
/*
* Optimization note: if we ever have enough constant
* memory, we could use it for storing the data.
* In current architectures if we use constant memory
* only a few threads will be able to run on the fly.
*
*/
for (int i=0; i<NSPEC; i++)
var(index,i) = conc(index,i);
for (int i=0; i<NFIX; i++)
fix(index,i) = conc(index,NVAR+i);
update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO, offset);
ros_Integrator_ros3(var, fix, Tstart, Tend, Texit,
// Integration parameters
autonomous, vectorTol, Max_no_steps,
roundoff, Hmin, Hmax, Hstart, Hexit,
FacMin, FacMax, FacRej, FacSafe,
// Status parameters
Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng,
// cuda global mem buffers
rconst, absTol, relTol, varNew, Fcn0,
K, dFdT, jac0, Ghimj, varErr,
// For update rconst
khet_st, khet_tr, jx,
VL_GLO, offset
);
for (int i=0; i<NVAR; i++)
conc(index,i) = var(index,i);
/* Statistics */
istatus(index,ifun) = Nfun;
istatus(index,ijac) = Njac;
istatus(index,istp) = Nstp;
istatus(index,iacc) = Nacc;
istatus(index,irej) = Nrej;
istatus(index,idec) = Ndec;
istatus(index,isol) = Nsol;
istatus(index,isng) = Nsng;
// Last T and H
rstatus(index,itexit) = Texit;
rstatus(index,ihexit) = Hexit;
}
}
|
3fa5b897022a88b44b726543724e2d89b5806db3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CUDA_LAUNCH_BLOCKING 1
#include "ctprofileevaluation.cuh"
#include <algorithm>
#include "stdio.h"
namespace imt
{
namespace volume
{
namespace cuda
{
static void HandleError(hipError_t err,
const char *file,
int line) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
struct ProfileEvaluatorData {
char data[256];
};
struct KernelData
{
float data[48];
};
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//We put the copy of single profile evaluator into a char array and deserialize it into device profile evaluator , the advantage is
// that all the parameters since remain constant for a profile evaluator , we dont need to reinitialize it for all the threads , but rather we can simply copy it
__device__ __constant__ ProfileEvaluatorData profileEvaluatorData; //char profileEvaluatorData[256];
__device__ __constant__ KernelData constGaussKernelData , constCannyKernelData , constSecDerKernelData;
//these kernels stay constant during gradient based extrema estimation
__device__ __constant__ float gaussPreEvaluatedKernel[43], cannyPreEvaluatedKernel[43], secDerPreEvaluatedKernel[43];
__device__ void ippsConvert_16u32f(unsigned short* pSrc, float* pDst, int len)
{
for (int ll = 0; ll < len; ll++)
{
pDst[ll] = pSrc[ll];
}
}
__device__ void ippsSet_16s(short value, short* arr, int len)
{
for (int ll = 0; ll < len; ll++)
{
arr[ll] = value;
}
}
__device__ void ippsNorm_L2_32f(float* arr, int len, float* norm)
{
*norm = 0;
for (int ll = 0; ll < len; ll++)
{
*norm += arr[ll] * arr[ll];
}
*norm = sqrtf(*norm);
}
__device__ void ippsSqr_32f_I(float* coeffs, int length)
{
for (int ii = 0; ii < length; ii++)
{
coeffs[ii] = coeffs[ii] * coeffs[ii];
}
}
__device__ void ippsDivC_32f_I(float denom, float* arr, int length)
{
float invDenom = 1.0f / denom;
for (int ii = 0; ii < length; ii++)
{
arr[ii] *= invDenom; ///= denom; //can use fast inbuilt division function
}
}
__device__ void ippsExp_32f_I(float* arr, int length)
{
for (int ii = 0; ii < length; ii++)
{
arr[ii] = expf(arr[ii]);
}
}
__device__ void ippsCopy_32f(float *src, float* dst, int len)
{
memcpy(dst, src, len * sizeof(float));
//for (int ll = 0; ll < len; ll++)
//{
// dst[ll] = src[ll];
//}
}
__device__ void ippsCopy_32f(unsigned short *src, float* dst, int len)
{
for (int ii = 0; ii < len; ii++)
{
dst[ii] = src[ii];
}
//memcpy(dst, src, len * sizeof(float));
}
__device__ void ippsMul_32f_I(const float* pSrc, float* pSrcDst, int len)
{
for (int ii = 0; ii < len; ii++)
{
pSrcDst[ii] = pSrcDst[ii] * pSrc[ii];
}
}
__device__ void ippsAddC_32f_I(float val, float *srcDst, int length)
{
for (int ll = 0; ll < length; ll++)
{
srcDst[ll] += val;
}
}
__device__ int fillGaussCoeffsCUDA(float* gaussCoeffs, float shoch2, int length, float* tempVector)
{
ippsSqr_32f_I(gaussCoeffs, length);
ippsDivC_32f_I(-2.0f * shoch2, gaussCoeffs, length);
ippsExp_32f_I(gaussCoeffs, length);
return 0;
}
__device__ int fillCoeffsCannyCUDA(float* gaussCoeffs, float shoch2, int length, float* tempVector)
{
ippsSqr_32f_I(gaussCoeffs, length);
ippsDivC_32f_I(-2.0f * shoch2, gaussCoeffs, length);
ippsExp_32f_I(gaussCoeffs, length);
return 0;
}
__device__ int fillCannyCoeffsCUDA(float* cannyCoeffs, float shoch2, int length, float* t)
{
ippsCopy_32f(cannyCoeffs, t, length);
ippsSqr_32f_I(cannyCoeffs, length);
ippsDivC_32f_I(-2.0f*shoch2, cannyCoeffs, length);
ippsExp_32f_I(cannyCoeffs, length);
ippsDivC_32f_I(-shoch2, cannyCoeffs, length);
ippsMul_32f_I(t, cannyCoeffs, length);
return 0;
}
__device__ int fillSecDerCoeffsCUDA(float* secDerCoeffs, float shoch2, int length, float* t)
{
/*if (!t)
{
throw "Memory allocation failed";
}*/
ippsSqr_32f_I(secDerCoeffs, length);
ippsDivC_32f_I(-2.0f*shoch2, secDerCoeffs, length);
ippsCopy_32f(secDerCoeffs, t, length);
ippsExp_32f_I(secDerCoeffs, length);
ippsAddC_32f_I(0.5f, t, length);
ippsMul_32f_I(t, secDerCoeffs, length);
ippsDivC_32f_I(-0.5f*shoch2, secDerCoeffs, length);
return 0;
}
__device__ void ippsDotProd_32f(float* src1, float* src2, int len, float* result)
{
for (int ll = 0; ll < len; ll++)
{
*result += src1[ll] * src2[ll];
}
}
__device__ void ippsDotProd_32f(unsigned short* src1, float* src2, int len, float* result)
{
for (int ll = 0; ll < len; ll++)
{
*result += src1[ll] * src2[ll];
}
}
__device__ void ippsDotProd_32f(unsigned short* src1, unsigned short* src2, int len, float* result)
{
for (int ll = 0; ll < len; ll++)
{
*result += src1[ll] * src2[ll];
}
}
__device__ void ippsSub_32f_I(float* pSrc, float* pSrcDst, int length)
{
for (int ll = 0; ll < length; ll++)
{
pSrcDst[ll] -= pSrc[ll];
}
}
__device__ void ippsSub_32f_I(unsigned short* pSrc, float* pSrcDst, int length)
{
for (int ll = 0; ll < length; ll++)
{
pSrcDst[ll] -= pSrc[ll];
}
}
__device__ void ippsConv_32f(const float* pSrc1, int src1Len, const float* pSrc2, int src2Len, float* pDst)
{
int dstLen = src1Len + src2Len - 1;
for (int ll = 0; ll < dstLen; ll++)
{
float conv = 0;
int start = __max(0, ll - src2Len + 1);
int end = __min(ll, src1Len - 1);
for (int kk = start; kk <= end; kk++)
{
//int p = ll - kk;
conv += pSrc1[kk] * pSrc2[ll - kk];
}
pDst[ll] = conv;
}
}
__device__ void ippsConv_32f(const unsigned short* pSrc1, int src1Len, const float* pSrc2, int src2Len, float* pDst)
{
int dstLen = src1Len + src2Len - 1;
for (int ll = 0; ll < dstLen; ll++)
{
float conv = 0;
int start = __max(0, ll - src2Len + 1);
int end = __min(ll, src1Len - 1);
for (int kk = start; kk <= end; kk++)
{
//int p = ll - kk;
conv += pSrc1[kk] * pSrc2[ll - kk];
}
pDst[ll] = conv;
}
}
//pSrcDst[n] = pSrcDst[n] + pSrc[n]*val, 0 n < len
__device__ void ippsAddProductC_32f(const float* pSrc, const float val, float* pSrcDst, int len)
{
for (int ll = 0; ll < len; ll++)
{
pSrcDst[ll] += val * pSrc[ll];
}
}
__device__ void ippsMulC_32f_I(float val, float* pSrcDst, int length)
{
for (int ll = 0; ll < length; ll++)
{
pSrcDst[ll] *= val;
}
}
__device__ CCTProfilsEvaluationSP_Device::CCTProfilsEvaluationSP_Device()
{
voxelStep = 0.25;
//profile = NULL;
memoryAllocated = false;
length = 0;
nProfils = 0;
zeroIndex = 0;
gaussCoeffs = 0;
cannyCoeffs = 0;
secDerCoeffs = 0;
filterCoeffs = 0;
tempVector = 0;
sigma = 0.0;
threshold = 0.0;
voxelType = Void;
searchRange = 20;
searchRangeNeg = 0;
tempConvLength = 0;
tempConvProfile = 0;
results = NULL;
resCanny = NULL;
resQuality = NULL;
ptValid = NULL;
rangeFactor = 3.5;
nValid = 0;
}
// __device__ void CCTProfilsEvaluationSP_Device::Init()
//{
// //assert(sigma > 0.4);
// dynSigma = sigma;
// shoch2 = dynSigma * dynSigma;
// gaussCoeffs = 0;
// cannyCoeffs = 0;
// secDerCoeffs = 0;
// filterCoeffs = 0;
// tempVector = 0;
// searchRangeNeg = searchRange;
// dynThresholdControl = false;
// dynThreshold = threshold;
// tempConvProfile = 0;
// tempConvLength = 0;
// coeffLength = 0;
// PreCalc();
// firstValid = -1;
// lastValid = -1;
// results = NULL;
// resCanny = NULL;
// resQuality = NULL;
// ptValid = NULL;
// nAngle = 0;
// rangeFactor = 3.5;
// nValid = 0;
//}
__device__ CCTProfilsEvaluationSP_Device::~CCTProfilsEvaluationSP_Device(void)
{
//delete[] gaussCoeffs;
//delete[] cannyCoeffs;
//delete[] secDerCoeffs;
//delete[] filterCoeffs;
//delete[] tempVector;
////ZiTrace("del tempConvProfile Destruktor: %x alte Lnge: %d\n",tempConvProfile,tempConvLength);
//delete[] tempConvProfile;
//if (memoryAllocated) delete[] profile;
//delete[] results;
//delete[] resCanny;
//delete[] resQuality;
//delete[] ptValid;
}
// Negativen Suchbereich abweichend von positivem setzen
//__device__ void CCTProfilsEvaluationSP_Device::SetSearchRangeNeg(float srNeg)
// {
// if (srNeg == 0.0)
// {
// searchRangeNeg = searchRange;
// }
// else
// {
// searchRangeNeg = (int)ceil(srNeg / voxelStep);
// }
// }
// // Suchbereich setzen
//__device__ void CCTProfilsEvaluationSP_Device::SetSearchRange(float sr)
// {
// searchRange = (int)ceil(sr / voxelStep);
// }
__device__ float Derivatives(float x, const ProfileEvaluationConstants& p, unsigned short* profile_16U,
float* filterCoeffs, float* tempVector , const float& dynSigma , const float& shoch2, int(*callback)(float*, float, int, float*))
{
//assert(sigma > 0.0);
int actFilterLength = int(p.rangeFactor * dynSigma / p.voxelStep);
//std::cout << "act filter length : " << actFilterLength<<" "<<dynSigma << std::endl;
//assert(actFilterLength <= coeffLength);
int filterIndex = int(floor(x / p.voxelStep)) + p.zeroIndex - actFilterLength; // Index Beginn Filtermaske
//assert(filterIndex >= 0 && filterIndex + 2 * actFilterLength + 1 < length);
filterCoeffs[0] = (float)((filterIndex - p.zeroIndex + 0.5) * p.voxelStep - x);
for (int ii = 1; ii < 2 * actFilterLength + 1; ii++)
{
filterCoeffs[ii] = filterCoeffs[ii - 1] + (float)p.voxelStep;
//printf("%f ", filterCoeffs[ii]);
}
callback(filterCoeffs, shoch2, 2 * actFilterLength, tempVector);
auto dat = profile_16U + filterIndex;
ippsCopy_32f(profile_16U + filterIndex, tempVector, 2 * actFilterLength + 1);
ippsSub_32f_I(profile_16U + filterIndex + 1, tempVector, 2 * actFilterLength + 1);
float result = 0;
ippsDotProd_32f(tempVector, filterCoeffs, 2 * actFilterLength, &result);
return -result;
}
// Gauss-gefilterter Wert
__device__ float CCTProfilsEvaluationSP_Device::Gauss(float x, int iProfil)
{
actFilterLength = int(rangeFactor * dynSigma / voxelStep);
//assert(actFilterLength <= coeffLength);
filterIndex = int(floor(x / voxelStep)) + zeroIndex - actFilterLength; // Index Beginn Filtermaske
if (x / voxelStep - floor(x / voxelStep) > 0.5)
filterIndex++;
//assert(filterIndex >= 0 && filterIndex + 2 * actFilterLength < length);
filterCoeffs[0] = (float)((filterIndex - zeroIndex) * voxelStep - x);
for (ii = 1; ii < 2 * actFilterLength + 1; ii++)
{
filterCoeffs[ii] = filterCoeffs[ii - 1] + (float)voxelStep;
}
fillGaussCoeffsCUDA(filterCoeffs, shoch2, 2 * actFilterLength + 1, tempVector);
result = 0;
ippsDotProd_32f(profile_16U + iProfil * length + filterIndex, filterCoeffs, 2 * actFilterLength + 1, &result);
return voxelStep * result / dynSigma / sqrtf(2 * M_PI);
}
__device__ float Gauss(float x, const ProfileEvaluationConstants& p, unsigned short* profile_16U, float* filterCoeffs, float* tempVector, const float& dynSigma , const float& shoch2 )
{
int actFilterLength = int( p.rangeFactor * dynSigma / p.voxelStep);
//assert(actFilterLength <= coeffLength);
int filterIndex = int(floor(x / p.voxelStep)) + p.zeroIndex - actFilterLength; // Index Beginn Filtermaske
if (x / p.voxelStep - floor(x / p.voxelStep) > 0.5)
filterIndex++;
//assert(filterIndex >= 0 && filterIndex + 2 * actFilterLength < length);
filterCoeffs[0] = (float)((filterIndex - p.zeroIndex) * p.voxelStep - x);
for ( int ii = 1; ii < 2 * actFilterLength + 1; ii++)
{
filterCoeffs[ii] = filterCoeffs[ii - 1] + (float)p.voxelStep;
}
fillGaussCoeffsCUDA(filterCoeffs, shoch2, 2 * actFilterLength + 1, tempVector);
float result = 0;
ippsDotProd_32f( profile_16U + filterIndex, filterCoeffs, 2 * actFilterLength + 1, &result);
return p.voxelStep * result / dynSigma / sqrtf(2 * M_PI);
}
// Erste gefilterte Ableitung - Canny
__device__ float CCTProfilsEvaluationSP_Device::Canny(float x, int iProfil)
{
//printf("[canny start gpu]\n");
float c = Derivatives(x, iProfil, &fillGaussCoeffsCUDA);
//printf("[Canny output %f]\n", c);
return c;
}
__device__ float Canny(float x, const ProfileEvaluationConstants& p, unsigned short* profile_16U, float* filterCoeffs, float* tempVector, const float& dynSigma, const float& shoch2)
{
//printf("[canny start gpu]\n");
float c = Derivatives(x, p, profile_16U, filterCoeffs, tempVector, dynSigma , shoch2 , &fillGaussCoeffsCUDA);
//printf("[Canny output %f]\n", c);
return c;
}
// Zweite gefilterte Ableitung - SecDer
__device__ float CCTProfilsEvaluationSP_Device::SecondDer(float x, int iProfil)
{
return Derivatives(x, iProfil, &fillCannyCoeffsCUDA);
}
__device__ float SecondDer(float x, const ProfileEvaluationConstants& p, unsigned short* profile_16U, float* filterCoeffs, float* tempVector , const float& dynSigma , const float& shoch2 )
{
return Derivatives( x, p, profile_16U, filterCoeffs, tempVector , dynSigma , shoch2 , &fillCannyCoeffsCUDA);
}
// Dritte gefilterte Ableitung - ThirdDer
__device__ float CCTProfilsEvaluationSP_Device::ThirdDer(float x, int iProfil)
{
return -Derivatives(x, iProfil, &fillSecDerCoeffsCUDA);
}
// Dritte gefilterte Ableitung - ThirdDer
__device__ float ThirdDer(float x, const ProfileEvaluationConstants& p, unsigned short* profile_16U , float* filterCoeffs , float* tempVector, const float& dynSigma, const float& shoch2)
{
return -Derivatives(x, p , profile_16U, filterCoeffs , tempVector, dynSigma , shoch2, &fillSecDerCoeffsCUDA);
}
// Basisfunktion fr gefilterte Ableitungen des Grauwertprofils
//Basic function for filtered derivatives of the gray value profile
__device__ float CCTProfilsEvaluationSP_Device::Derivatives(float x, int iProfil, int(*callback)(float*, float, int, float*))
{
//assert(sigma > 0.0);
actFilterLength = int(rangeFactor * dynSigma / voxelStep);
//std::cout << "act filter length : " << actFilterLength<<" "<<dynSigma << std::endl;
//assert(actFilterLength <= coeffLength);
filterIndex = int(floor(x / voxelStep)) + zeroIndex - actFilterLength; // Index Beginn Filtermaske
//assert(filterIndex >= 0 && filterIndex + 2 * actFilterLength + 1 < length);
filterCoeffs[0] = (float)((filterIndex - zeroIndex + 0.5)*voxelStep - x);
for (ii = 1; ii < 2 * actFilterLength + 1; ii++)
{
filterCoeffs[ii] = filterCoeffs[ii - 1] + (float)voxelStep;
//printf("%f ", filterCoeffs[ii]);
}
callback(filterCoeffs, shoch2, 2 * actFilterLength, tempVector);
auto dat = profile_16U + iProfil * length + filterIndex;
ippsCopy_32f(profile_16U + iProfil * length + filterIndex, tempVector, 2 * actFilterLength + 1);
ippsSub_32f_I(profile_16U + iProfil * length + filterIndex + 1, tempVector, 2 * actFilterLength + 1);
result = 0;
ippsDotProd_32f(tempVector, filterCoeffs, 2 * actFilterLength, &result);
return -result;
}
__device__ float CCTProfilsEvaluationSP_Device::CannyOpt(int i, int iProfil)
{
//assert(i >= coeffLength && i + coeffLength < length);
result = 0;
ippsDotProd_32f(profile_16U + iProfil * length + i - coeffLength, gaussCoeffs, 2 * coeffLength + 1, &result);
return result;
}
__device__ float CannyOpt(int i , const ProfileEvaluationConstants& p, unsigned short* profile_16U)
{
//assert(i >= coeffLength && i + coeffLength < length);
float result = 0;
ippsDotProd_32f(profile_16U + i - p.coeffLength, p.gaussCoeffs, 2 * p.coeffLength + 1, &result);
return result;
}
__device__ float CCTProfilsEvaluationSP_Device::SecDerOpt(int i, int iProfil)
{
//assert(i >= coeffLength && i + coeffLength < length);
result = 0;
ippsDotProd_32f(profile_16U + iProfil * length + i - coeffLength, cannyCoeffs, 2 * coeffLength + 1, &result);
return result;
}
__device__ float SecDerOpt(int i , const ProfileEvaluationConstants& p, unsigned short* profile_16U )
{
//assert(i >= coeffLength && i + coeffLength < length);
float result = 0;
ippsDotProd_32f( profile_16U + i - p.coeffLength , p.cannyCoeffs , 2 * p.coeffLength + 1 , &result );
return result;
}
__device__ int CCTProfilsEvaluationSP_Device::FoldCannyOpt(int iProfil, float *cannyProfile)
{
//assert(cannyProfile);
//assert(zeroIndex - searchRangeNeg >= coeffLength && zeroIndex + searchRange + coeffLength < length);
ippsConv_32f(profile_16U + iProfil * length + zeroIndex - searchRangeNeg - coeffLength, 2 * coeffLength + searchRange + searchRangeNeg + 1, gaussCoeffs, 2 * coeffLength + 1, cannyProfile);
return searchRangeNeg + 2 * coeffLength; // Das ist der ZeroIndex
}
__device__ int FoldCannyOpt(const ProfileEvaluationConstants& p, unsigned short* profile_16U, float *cannyProfile)
{
//assert(cannyProfile);
//assert(zeroIndex - searchRangeNeg >= coeffLength && zeroIndex + searchRange + coeffLength < length);
ippsConv_32f( profile_16U + p.zeroIndex - p.searchRangeNeg - p.coeffLength,
2 * p.coeffLength + p.searchRange + p.searchRangeNeg + 1,
p.gaussCoeffs, 2 * p.coeffLength + 1, cannyProfile);
return p.searchRangeNeg + 2 * p.coeffLength; // Das ist der ZeroIndex
}
__device__ int CCTProfilsEvaluationSP_Device::FoldSecDerOpt(int iProfil, float *secDerProfile)
{
//assert(secDerProfile);
//assert(zeroIndex - searchRangeNeg >= coeffLength && zeroIndex + searchRange + coeffLength <= length);
ippsConv_32f( profile_16U + iProfil * length + zeroIndex - searchRangeNeg - coeffLength ,
2 * coeffLength + searchRange + searchRangeNeg + 1, cannyCoeffs, 2 * coeffLength + 1, secDerProfile);
//printf("%d %d %d \n", zeroIndex - searchRangeNeg - coeffLength, (2 * coeffLength + searchRange + searchRangeNeg + 1), 2 * coeffLength + 1);
return searchRangeNeg + 2 * coeffLength; // Das ist der ZeroIndex
}
__device__ int FoldSecDerOpt( const ProfileEvaluationConstants& p, unsigned short* profile_16U , float *secDerProfile)
{
//assert(secDerProfile);
//assert(zeroIndex - searchRangeNeg >= coeffLength && zeroIndex + searchRange + coeffLength <= length);
ippsConv_32f(profile_16U + p.zeroIndex - p.searchRangeNeg - p.coeffLength,
2 * p.coeffLength + p.searchRange + p.searchRangeNeg + 1, p.cannyCoeffs, 2 * p.coeffLength + 1, secDerProfile);
//printf("%d %d %d \n", zeroIndex - searchRangeNeg - coeffLength, (2 * coeffLength + searchRange + searchRangeNeg + 1), 2 * coeffLength + 1);
return p.searchRangeNeg + 2 * p.coeffLength; // Das ist der ZeroIndex
}
__device__ int CCTProfilsEvaluationSP_Device::FoldThirdDerOpt(int iProfil, float *thirdDerProfile, int convRangeNeg, int convRangePos)
{
//assert(thirdDerProfile);
if (!convRangeNeg || zeroIndex - convRangeNeg < coeffLength)
convRangeNeg = zeroIndex - coeffLength;
if (!convRangePos || zeroIndex + convRangePos + coeffLength >= length)
convRangePos = length - coeffLength - zeroIndex - 1;
//assert(zeroIndex - convRangeNeg >= coeffLength && zeroIndex + convRangePos + coeffLength < length);
ippsConv_32f(profile_16U + iProfil * length + zeroIndex - convRangeNeg - coeffLength,
2 * coeffLength + convRangePos + convRangeNeg + 1, secDerCoeffs,
2 * coeffLength + 1, thirdDerProfile);
return convRangeNeg + 2 * coeffLength; // Das ist der ZeroIndex
}
__device__ int FoldThirdDerOpt( const ProfileEvaluationConstants& p , unsigned short* profile_16U, float *thirdDerProfile, int convRangeNeg, int convRangePos)
{
//assert(thirdDerProfile);
if (!convRangeNeg || p.zeroIndex - convRangeNeg < p.coeffLength)
convRangeNeg = p.zeroIndex - p.coeffLength;
if (!convRangePos || p.zeroIndex + convRangePos + p.coeffLength >= p.length)
convRangePos = p.length - p.coeffLength - p.zeroIndex - 1;
//assert(zeroIndex - convRangeNeg >= coeffLength && zeroIndex + convRangePos + coeffLength < length);
ippsConv_32f( profile_16U + p.zeroIndex - convRangeNeg - p.coeffLength,
2 * p.coeffLength + convRangePos + convRangeNeg + 1, p.secDerCoeffs,
2 * p.coeffLength + 1, thirdDerProfile);
return convRangeNeg + 2 * p.coeffLength; // Das ist der ZeroIndex
}
// direct put dyn Sigma
__device__ void CCTProfilsEvaluationSP_Device::PutDynSigma(float newValue)
{
dynSigma = newValue;
shoch2 = dynSigma * dynSigma;
}
__device__ void PutDynSigma( const ProfileEvaluationConstants&p , float newValue , float& dynSigma , float& shoch2 )
{
dynSigma = newValue;
shoch2 = dynSigma * dynSigma;
}
// Dynamisches p.sigma begrenzen (kleiner als p.sigma und > 0.75)
__device__ bool SetDynSigma( CCTProfilsEvaluationSP_Device& p , float x, int iProfil)
{
// DPVector::const_iterator i;
float curThreshold = -0.1f*p.Canny(x, iProfil);
bool minBegrenzung = true, maxBegrenzung = true;
float minIndex = x, maxIndex = x, xx;
// Suche neg. Umkehrpunkt im Profil mit 10% Toleranz
do
{
minIndex -= p.voxelStep / 4;
} while (p.Canny(minIndex, iProfil) > curThreshold &&
(minIndex - x < 4 * p.sigma) &&
(minIndex / p.voxelStep > -p.searchRangeNeg));
// berprfen auf reale Gegenflanke ab 50% Hhe
xx = minIndex;
do
{
xx -= p.voxelStep / 4;
if (x - xx > 4 * p.sigma || (xx / p.voxelStep <= -p.searchRangeNeg))
break;
} while (minBegrenzung = (p.Canny(xx, iProfil) > 5 * curThreshold));
// Suche pos. Umkehrpunkt im Profil mit 10% Toleranz
curThreshold = -0.1f*p.Canny(x, iProfil);
do
{
maxIndex += p.voxelStep / 4;
} while (p.Canny(maxIndex, iProfil) > curThreshold &&
(maxIndex - x < 4 * p.sigma) &&
(maxIndex / p.voxelStep > p.searchRange));
// berprfen auf reale Gegenflanke ab 50% Hhe
xx = maxIndex;
do
{
xx += p.voxelStep / 4;
if (xx - x > 4 * p.sigma || xx / p.voxelStep >= p.searchRange)
break;
} while (maxBegrenzung = (p.Canny(xx, iProfil) > 5 * curThreshold));
// Wenn Gegenflanke, p.sigma eingernzen auf Abstand zum Umkehrpunkt
// DER FAKTOR 4.0 IST EXPERIMENTELL
// When counter - flanking, p.sigma is on the distance to the reversal point
// THE FACTOR 4.0 IS EXPERIMENTAL
if (!(minBegrenzung && maxBegrenzung))
p.dynSigma = (float)((maxIndex - x) < (x - minIndex) ? (maxIndex - x) : (x - minIndex)) / 4.0f;
else
{
p.dynSigma = p.sigma;
p.shoch2 = p.dynSigma* p.dynSigma;
return false;
}
// Bereich begrenzen
if (p.dynSigma > p.sigma)
{
p.dynSigma = p.sigma;
p.shoch2 = p.dynSigma* p.dynSigma;
return false;
}
if (p.dynSigma < 0.35f)
p.dynSigma = 0.35f;
p.shoch2 = p.dynSigma* p.dynSigma;
return true;
}
__device__ bool SetDynSigma(const ProfileEvaluationConstants& p, float x, unsigned short* profile_16U, float* filterBuffer, float* tempVector, float& dynSigma, float& shoch2 )
{
// DPVector::const_iterator i;
float curThreshold = -0.1f * Canny(x, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2 ); //p.Canny(x, iProfil);
bool minBegrenzung = true, maxBegrenzung = true;
float minIndex = x, maxIndex = x, xx;
// Suche neg. Umkehrpunkt im Profil mit 10% Toleranz
do
{
minIndex -= p.voxelStep / 4;
} while ( Canny(minIndex, p, profile_16U, filterBuffer, tempVector, dynSigma, shoch2) > curThreshold && //while (p.Canny(minIndex, iProfil) > curThreshold &&
(minIndex - x < 4 * p.sigma) &&
(minIndex / p.voxelStep > -p.searchRangeNeg));
// berprfen auf reale Gegenflanke ab 50% Hhe
xx = minIndex;
do
{
xx -= p.voxelStep / 4;
if (x - xx > 4 * p.sigma || (xx / p.voxelStep <= -p.searchRangeNeg))
break;
} while (minBegrenzung = (Canny(xx, p, profile_16U, filterBuffer, tempVector, dynSigma, shoch2) > 5 * curThreshold));
// Suche pos. Umkehrpunkt im Profil mit 10% Toleranz
curThreshold = -0.1f*Canny(x, p, profile_16U, filterBuffer, tempVector, dynSigma, shoch2);
do
{
maxIndex += p.voxelStep / 4;
} while (Canny(maxIndex, p, profile_16U, filterBuffer, tempVector, dynSigma, shoch2) > curThreshold &&
(maxIndex - x < 4 * p.sigma) &&
(maxIndex / p.voxelStep > p.searchRange));
// berprfen auf reale Gegenflanke ab 50% Hhe
xx = maxIndex;
do
{
xx += p.voxelStep / 4;
if (xx - x > 4 * p.sigma || xx / p.voxelStep >= p.searchRange)
break;
} while (maxBegrenzung = (Canny(xx, p, profile_16U, filterBuffer, tempVector, dynSigma, shoch2) > 5 * curThreshold));
// Wenn Gegenflanke, p.sigma eingernzen auf Abstand zum Umkehrpunkt
// DER FAKTOR 4.0 IST EXPERIMENTELL
// When counter - flanking, p.sigma is on the distance to the reversal point
// THE FACTOR 4.0 IS EXPERIMENTAL
if (!(minBegrenzung && maxBegrenzung))
dynSigma = (float)((maxIndex - x) < (x - minIndex) ? (maxIndex - x) : (x - minIndex)) / 4.0f;
else
{
dynSigma = p.sigma;
shoch2 = dynSigma * dynSigma;
return false;
}
// Bereich begrenzen
if ( dynSigma > p.sigma)
{
dynSigma = p.sigma;
shoch2 = dynSigma * dynSigma;
return false;
}
if ( dynSigma < 0.35f)
dynSigma = 0.35f;
shoch2 = dynSigma * dynSigma;
return true;
}
__device__ bool NewtonMax( CCTProfilsEvaluationSP_Device& p , float& x, int iProfil)
{
bool result = true;
float start_x = x;
float z;
int it = 0;
float lastZ;
//printf("start x : %f \n", start_x);
do
{
z = p.ThirdDer(x, iProfil);
if (z == 0) {
result = false;
break;
}
z = p.SecondDer(x, iProfil) / z; // Neue Schrittweite
//printf("z %f : ", z);
if (it == 0 && fabs(z) > 1.0f)
z *= 0.1f;
if (fabs(z) > 3.0f) // konvergiert offenbar nicht, empirisch gewonnen
{
result = false;
break;
}
if (it > 0 && std::abs(z + lastZ) < 0.01f)
z *= 0.5f;
x = x - z; // Korrektur anwenden
//printf("%f ", x);
lastZ = z;
if (it++ > 25) // Endlositeration
{
result = false;
break;
}
} while (fabs(z) > 0.001); // 0.001 bezieht sich auf Voxelmass und sollte ausreichen
//printf("\n");
if (!result)
x = start_x;
return result;
}
__device__ bool NewtonMax( const ProfileEvaluationConstants& p, float& x, unsigned short* profile_16U, float* filterBuffer, float* tempVector, const float& dynSigma, const float& shoch2)
{
bool result = true;
float start_x = x;
float z;
int it = 0;
float lastZ;
//printf("start x : %f \n", start_x);
do
{
z = ThirdDer(x, p , profile_16U , filterBuffer , tempVector, dynSigma, shoch2);
if (z == 0) {
result = false;
break;
}
z = SecondDer(x, p, profile_16U, filterBuffer, tempVector, dynSigma , shoch2) / z; //p.SecondDer(x, iProfil) / z; // Neue Schrittweite
if (it == 0 && fabs(z) > 1.0f)
z *= 0.1f;
if (fabs(z) > 3.0f) // konvergiert offenbar nicht, empirisch gewonnen
{
result = false;
break;
}
if (it > 0 && std::abs(z + lastZ) < 0.01f)
z *= 0.5f;
x = x - z; // Korrektur anwenden
//printf("%f ", x);
lastZ = z;
if (it++ > 25) // Endlositeration
{
result = false;
break;
}
} while (fabs(z) > 0.001); // 0.001 bezieht sich auf Voxelmass und sollte ausreichen
//printf("\n ", x);
if (!result)
x = start_x;
return result;
}
__device__ float GradientLength(CCTProfilsEvaluationSP_Device& p, float x, int iProfil, float* gaussLow, float* gaussHigh, float xCanny)
{
if (xCanny == 0 && p.ptValid[iProfil])
xCanny = p.resCanny[iProfil];
int sign = 1;
if (xCanny < 0) sign = -1; // Sprung abwrts (interessant fr Mehr-Material)
// Suche des Parameters mit 50% xCanny (Maximalwert)
int iLow = (int)floor((x) / p.voxelStep);
int iBase = iLow;
while (sign * p.SecDerOpt(iLow + p.zeroIndex, iProfil) > -0.25*sign * xCanny / p.dynSigma && (iBase - iLow) * p.voxelStep <= 5.0 && (iLow + p.zeroIndex > p.coeffLength))
iLow--;
if (!((iBase - iLow)*p.voxelStep <= 5.0))
iLow = iBase - 1;
int iHigh = iBase + 1;
while (sign*p.SecDerOpt(iHigh + p.zeroIndex, iProfil) < 0.25*sign*xCanny / p.dynSigma && (iHigh - iBase)*p.voxelStep < 5.0 && (iHigh + p.zeroIndex < p.length - p.coeffLength - 1))
iHigh++;
if (!((iHigh - iBase)*p.voxelStep < 5.0))
iHigh = iBase + 1;
// Faltung dritte Ableitung +/- 10 Voxel um x
int searchRangeRoot = int(10.0 / p.voxelStep);
int coeffDistance = int(p.coeffLength / p.voxelStep);
if (p.zeroIndex + iBase - searchRangeRoot <= coeffDistance)
searchRangeRoot = p.zeroIndex + iBase - coeffDistance;
if (p.zeroIndex + iBase + searchRangeRoot >= p.length - coeffDistance)
searchRangeRoot = searchRangeRoot - (p.zeroIndex + iBase + coeffDistance);
int foldZeroIndex = p.FoldThirdDerOpt(iProfil, p.tempConvProfile, -iBase + searchRangeRoot, iBase + searchRangeRoot);
// Suche nach Nullstelle in dritter Ableitung Luftseite
iHigh += foldZeroIndex;
iLow += foldZeroIndex;
iBase += foldZeroIndex;
float x_vw = 0.0, x_rw = 0.0; // Treffer der Vor- und Rckwrtssuche
bool hit_vw = false, hit_rw = false; // Indikatoren fr Treffer mit Schwellwert
// Loop mit gleichteitiger Vor- und Rckwrtssuceh
while (1)
{
// Test Suchbereich und Vorzeichenwechsel 2.Abl.
if ((iHigh - iBase) * p.voxelStep <= searchRangeRoot * p.voxelStep && sign*p.tempConvProfile[iHigh + 1] < 0 && sign*p.tempConvProfile[iHigh]>0)
{
// Interpolation Treffer vorwrts
x_vw = (iHigh + p.tempConvProfile[iHigh] / (p.tempConvProfile[iHigh] - p.tempConvProfile[iHigh + 1]) - foldZeroIndex)*p.voxelStep;
int iTest = (int)floor(x_vw / p.voxelStep + 0.5);
float t = sign * p.CannyOpt(/*iHigh - foldZeroIndex*/iTest + p.zeroIndex, iProfil);
if (t > 0.05*sign*xCanny && t<0.85*sign*xCanny && sign*p.SecDerOpt(/*iHigh - foldZeroIndex*/iTest + p.zeroIndex, iProfil)>0.15*sign*xCanny / p.dynSigma) hit_vw = true;
}
// Test Suchbereich und Vorzeichenwechsel 2.Abl.
if ((iBase - iLow)*p.voxelStep <= searchRangeRoot * p.voxelStep && sign*p.tempConvProfile[iLow] > 0 && sign*p.tempConvProfile[iLow - 1] < 0)
{
// Interpolation Treffer rckwrts
x_rw = (iLow - p.tempConvProfile[iLow] / (p.tempConvProfile[iLow] - p.tempConvProfile[iLow - 1]) - foldZeroIndex)*p.voxelStep;
int iTest = (int)floor(x_rw / p.voxelStep + 0.5);
float t = sign * p.CannyOpt(/*iLow - foldZeroIndex*/iTest + p.zeroIndex, iProfil);
if (t > 0.05*sign*xCanny && t < 0.85*sign*xCanny && sign*p.SecDerOpt(/*iLow - foldZeroIndex*/iTest + p.zeroIndex, iProfil) < -0.15*sign*xCanny / p.dynSigma) hit_rw = true;
}
if (hit_vw && hit_rw)
break; // beide Grenzen gefunden
if ((iBase - iLow)*p.voxelStep >= searchRangeRoot * p.voxelStep || (iHigh - iBase)*p.voxelStep >= searchRangeRoot * p.voxelStep)
break; // Suchbereich abgegrast
iHigh++; iLow--;
}
if (hit_vw && hit_rw)
{
if (sign == -1)
{
if (gaussLow) *gaussLow = p.Gauss(x_vw, iProfil);
if (gaussHigh) *gaussHigh = p.Gauss(x_rw, iProfil);
}
else
{
if (gaussLow) *gaussLow = p.Gauss(x_rw, iProfil);
if (gaussHigh) *gaussHigh = p.Gauss(x_vw, iProfil);
}
return x_vw - x_rw; // Differenz zwischen Wendepunkten ist gesuchte Kenngre
}
else
{
if (gaussLow) *gaussLow = 0;
if (gaussHigh) *gaussHigh = 0;
return 0.0;
}
}
__device__ float GradientLength( const ProfileEvaluationConstants& p, float x, unsigned short* profile_16U , float* tempConvProfile , float* filterBuffer, float* tempVector,
bool& ptValid , float& resCanny , float* gaussLow, float* gaussHigh, float xCanny, const float& dynSigma, const float& shoch2)
{
/*if (xCanny == 0 && p.ptValid[iProfil])
xCanny = p.resCanny[iProfil];
*/
if (xCanny == 0 && ptValid)
xCanny = resCanny;
int sign = 1;
if (xCanny < 0) sign = -1; // Sprung abwrts (interessant fr Mehr-Material)
// Suche des Parameters mit 50% xCanny (Maximalwert)
int iLow = (int)floor((x) / p.voxelStep);
int iBase = iLow;
//while (sign * p.SecDerOpt(iLow + p.zeroIndex, iProfil) > -0.25*sign * xCanny / p.dynSigma && (iBase - iLow) * p.voxelStep <= 5.0 && (iLow + p.zeroIndex > p.coeffLength))
while (sign * SecDerOpt(iLow + p.zeroIndex, p , profile_16U) > -0.25*sign * xCanny / dynSigma && (iBase - iLow) * p.voxelStep <= 5.0 && (iLow + p.zeroIndex > p.coeffLength))
iLow--;
if (!((iBase - iLow)*p.voxelStep <= 5.0))
iLow = iBase - 1;
int iHigh = iBase + 1;
//while (sign*p.SecDerOpt(iHigh + p.zeroIndex, iProfil) < 0.25*sign*xCanny / p.dynSigma && (iHigh - iBase)*p.voxelStep < 5.0 && (iHigh + p.zeroIndex < p.length - p.coeffLength - 1))
while ( sign * SecDerOpt( iHigh + p.zeroIndex, p, profile_16U ) < 0.25*sign*xCanny / dynSigma && (iHigh - iBase)*p.voxelStep < 5.0 && (iHigh + p.zeroIndex < p.length - p.coeffLength - 1))
iHigh++;
if (!((iHigh - iBase)*p.voxelStep < 5.0))
iHigh = iBase + 1;
// Faltung dritte Ableitung +/- 10 Voxel um x
int searchRangeRoot = int(10.0 / p.voxelStep);
int coeffDistance = int(p.coeffLength / p.voxelStep);
if (p.zeroIndex + iBase - searchRangeRoot <= coeffDistance)
searchRangeRoot = p.zeroIndex + iBase - coeffDistance;
if (p.zeroIndex + iBase + searchRangeRoot >= p.length - coeffDistance)
searchRangeRoot = searchRangeRoot - (p.zeroIndex + iBase + coeffDistance);
int foldZeroIndex = FoldThirdDerOpt(p , profile_16U, tempConvProfile, -iBase + searchRangeRoot, iBase + searchRangeRoot); //p.FoldThirdDerOpt(iProfil, p.tempConvProfile, -iBase + searchRangeRoot, iBase + searchRangeRoot);
// Suche nach Nullstelle in dritter Ableitung Luftseite
iHigh += foldZeroIndex;
iLow += foldZeroIndex;
iBase += foldZeroIndex;
float x_vw = 0.0, x_rw = 0.0; // Treffer der Vor- und Rckwrtssuche
bool hit_vw = false, hit_rw = false; // Indikatoren fr Treffer mit Schwellwert
// Loop mit gleichteitiger Vor- und Rckwrtssuceh
while (1)
{
// Test Suchbereich und Vorzeichenwechsel 2.Abl.
if ((iHigh - iBase) * p.voxelStep <= searchRangeRoot * p.voxelStep && sign * tempConvProfile[iHigh + 1] < 0 && sign * tempConvProfile[iHigh]>0)
{
// Interpolation Treffer vorwrts
x_vw = ( iHigh + tempConvProfile[iHigh] / ( tempConvProfile[iHigh] - tempConvProfile[iHigh + 1] ) - foldZeroIndex)*p.voxelStep;
int iTest = (int)floor(x_vw / p.voxelStep + 0.5);
float t = sign * CannyOpt(iTest + p.zeroIndex, p, profile_16U); //p.CannyOpt(/*iHigh - foldZeroIndex*/iTest + p.zeroIndex, iProfil);
//if (t > 0.05*sign*xCanny && t<0.85*sign*xCanny && sign*p.SecDerOpt(/*iHigh - foldZeroIndex*/iTest + p.zeroIndex, iProfil)>0.15*sign*xCanny / p.dynSigma)
if (t > 0.05*sign*xCanny && t<0.85*sign*xCanny && sign * SecDerOpt(iTest + p.zeroIndex, p , profile_16U) > 0.15*sign*xCanny / dynSigma)
hit_vw = true;
}
// Test Suchbereich und Vorzeichenwechsel 2.Abl.
if ((iBase - iLow)*p.voxelStep <= searchRangeRoot * p.voxelStep && sign * tempConvProfile[iLow] > 0 && sign * tempConvProfile[iLow - 1] < 0)
{
// Interpolation Treffer rckwrts
x_rw = (iLow - tempConvProfile[iLow] / ( tempConvProfile[iLow] - tempConvProfile[iLow - 1]) - foldZeroIndex)*p.voxelStep;
int iTest = (int)floor(x_rw / p.voxelStep + 0.5);
float t = sign * CannyOpt(iTest + p.zeroIndex, p, profile_16U); //p.CannyOpt(/*iLow - foldZeroIndex*/iTest + p.zeroIndex, iProfil);
//if (t > 0.05*sign*xCanny && t < 0.85*sign*xCanny && sign*p.SecDerOpt(/*iLow - foldZeroIndex*/iTest + p.zeroIndex, iProfil) < -0.15*sign*xCanny / p.dynSigma)
if (t > 0.05*sign*xCanny && t < 0.85*sign*xCanny && sign * SecDerOpt(/*iLow - foldZeroIndex*/iTest + p.zeroIndex, p , profile_16U) < -0.15*sign*xCanny / dynSigma)
hit_rw = true;
}
if (hit_vw && hit_rw)
break; // beide Grenzen gefunden
if ((iBase - iLow)*p.voxelStep >= searchRangeRoot * p.voxelStep || (iHigh - iBase)*p.voxelStep >= searchRangeRoot * p.voxelStep)
break; // Suchbereich abgegrast
iHigh++; iLow--;
}
if (hit_vw && hit_rw)
{
if (sign == -1)
{
if (gaussLow) *gaussLow = Gauss( x_vw, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2 ); //p.Gauss(x_vw, iProfil);
if (gaussHigh) *gaussHigh = Gauss( x_rw, p, profile_16U, filterBuffer, tempVector, dynSigma , shoch2 ); ////p.Gauss(x_rw, iProfil);
}
else
{
if (gaussLow) *gaussLow = Gauss(x_rw, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); // //p.Gauss(x_rw, iProfil);
if (gaussHigh) *gaussHigh = Gauss(x_vw, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); // //p.Gauss(x_vw, iProfil);
}
return x_vw - x_rw; // Differenz zwischen Wendepunkten ist gesuchte Kenngre
}
else
{
if (gaussLow) *gaussLow = 0;
if (gaussHigh) *gaussHigh = 0;
return 0.0;
}
}
__device__ bool SearchAroundZero(CCTProfilsEvaluationSP_Device& p, float& x, int iProfil, float fSearchRange, float fSearchRangeNeg, float staticTest,
float airPointsThresh, bool dynControl, int sign)
{
//std::cout << "range factor : " << p.rangeFactor << std::endl;
bool result = true;
//assert(p.threshold > 0.0);
//assert(p.tempConvLength > 2 * p.coeffLength + p.searchRange + p.searchRangeNeg);
//assert(p.dynSigma > 0.3);
p.PutDynSigma(p.sigma); // fr jeden Punkt zurcksetzen !
// Dyn. p.threshold evtl. rcksetzen
if (!p.dynThresholdControl)
p.dynThreshold = p.threshold;
if (p.dynThreshold > p.threshold)
p.dynThreshold = p.threshold;
p.resQuality[iProfil] = -1.0;
float x_vw = 0.0, x_rw = 0.0; // Treffer der Vor- und Rckwrtssuche
// Vorhandenes Resultat verwenden
if (p.ptValid[iProfil] != true || p.results[iProfil] > 1e6)
{
p.ptValid[iProfil] = false;
//Fold second derivative over entire search area
p.convProfileZeroIndex = p.FoldSecDerOpt(iProfil, p.tempConvProfile);
int i_vw = p.convProfileZeroIndex, i_rw = p.convProfileZeroIndex; //Index of forward and backward search
bool hit_vw = false, hit_rw = false; //Threshold hit indicators
//std::cout << "convolution profile : " << p.tempConvProfile[100] << " " << p.tempConvProfile[150] << " " << p.tempConvProfile[250] << std::endl;
//printf("convolution profile gpu : %f %f %f \n : ", p.tempConvProfile[100], p.tempConvProfile[150], p.tempConvProfile[250]);
//Loop with equal forward and reverse sweep
while (1)
{
// Test search range and sign change 2.Abl.
// It is tested until the successor of i_vw, if there is no sign change,
// then no zero - at the whole coordinates of the opt-folding is exact!
if (i_vw - p.convProfileZeroIndex < p.searchRange - 1 &&
sign * p.tempConvProfile[i_vw + 1] > 0 &&
sign * p.tempConvProfile[i_vw] < 0)
{
//Interpolation hits forward
x_vw = (i_vw + p.tempConvProfile[i_vw] / (p.tempConvProfile[i_vw] - p.tempConvProfile[i_vw + 1]) - p.convProfileZeroIndex) * p.voxelStep;
//printf(" canny vw : %f ", p.Canny(x_vw, iProfil));
if (sign * p.Canny(x_vw, iProfil) > sign * p.dynThreshold) // Schwellwertkriterium
{
if (!hit_vw && !hit_rw)
{
hit_vw = true;
x = x_vw;
}
else
p.resQuality[iProfil] = 50.0;
}
}
//Test search range and sign change 2.Abl.
if (p.convProfileZeroIndex - i_rw < p.searchRangeNeg - 1 && sign * p.tempConvProfile[i_rw] > 0 && sign * p.tempConvProfile[i_rw - 1] < 0)
{
//Interpolation hits backwards
x_rw = (i_rw - p.tempConvProfile[i_rw] / (p.tempConvProfile[i_rw] - p.tempConvProfile[i_rw - 1]) - p.convProfileZeroIndex) * p.voxelStep;
//printf(" canny : %f ", p.Canny(x_rw, iProfil));
if (sign * p.Canny(x_rw, iProfil) > sign * p.dynThreshold) //threshold criterion
if (!hit_rw && !hit_vw)
{
hit_rw = true;
x = x_rw;
}
else if (hit_vw && !hit_rw)
{
hit_rw = true;
x = (x < -x_rw) ? x : x_rw;
}
else p.resQuality[iProfil] = 50.0;
}
if (!dynControl && (hit_vw || hit_rw))
break; //Landed hits
i_vw++; i_rw--;
if (i_vw - p.convProfileZeroIndex > p.searchRange && p.convProfileZeroIndex - i_rw > p.searchRangeNeg)
break; //Search area browsed
}
if (!hit_vw && !hit_rw)
result = false;
printf("\n hit found : %f %d %d %d \n", x_vw, hit_vw, hit_rw, result);
printf("dynamic threshold %f %d %d %d %f \n", p.dynThreshold, sign, p.convProfileZeroIndex, p.searchRangeNeg , p.voxelStep);
}
else x = p.results[iProfil];
if (result && dynControl)
result = NewtonMax( p , x, iProfil); // Punkt genau gefunden?? Ergebnis in x!!!
printf("\n newton max : %f %d %f %f\n", x, result , p.dynSigma , p.shoch2);
if (result)
if (-x > fSearchRangeNeg || x > fSearchRange)
result = false;
while (result) // Genaue Bestimmung Nulldurchgang erfolgreich
{
bool dynCorr = false;
if (dynControl)
dynCorr = SetDynSigma( p , x, iProfil);
if (dynCorr)
{
result = NewtonMax( p , x, iProfil);
p.dynThreshold = p.dynSigma / p.sigma*p.threshold; // Auch den Schwellwert anpassen, heuristisch...
if (!result)
break;
}
p.resCanny[iProfil] = p.Canny(x, iProfil);
if ((sign*p.resCanny[iProfil] < sign*p.dynThreshold) // Gradientenschwellwert berschritten?
|| (x > fSearchRange)
|| (x < -fSearchRangeNeg))
{
result = false;
break;
}
float actGradLength = 0;
bool staticChecked = false;
// berprfung mit statischem Schwellwert
if (dynControl)
{
float high, low;
// Gradientensprunglnge und Endpunkte berechnen
actGradLength = GradientLength( p , x, iProfil, &low, &high, p.resCanny[iProfil]);
if (low > 0 && high > 0)
staticChecked = true;
if (staticChecked && staticTest > 0)
{
if (staticTest > high || staticTest < low)
{
result = false;
break;
}
}
}
// Wenn die Berechnung der Gradientenlnge nicht funktioniert oder dynControl aus ist (Soll-Ist-Vergleich)
if (!staticChecked && staticTest > 0)
{
float lowValue = p.Gauss(x - 2 * p.sigma, iProfil);
float highValue = p.Gauss(x + 2 * p.sigma, iProfil);
if (lowValue > staticTest || highValue < staticTest)
{
result = false;
break;
}
}
// Luftpunkttest
if (airPointsThresh > 0)
{
float grayActual = p.Gauss(x, iProfil);
if (grayActual < airPointsThresh)
{
result = false;
break;
}
}
// Dynamischen p.threshold auf 75% des Maximums dieses Punkts setzen
//Set dynamic p.threshold to 75 % of the maximum of this point
if (p.dynThresholdControl)
p.dynThreshold = (float)fabs(p.Canny(x, iProfil)) * 3 / 4;
// Aber nicht grer als vorgeg. Schwellwert
//But not bigger than vorg. threshold
if (p.dynThreshold > p.threshold)
p.dynThreshold = p.threshold;
p.ptValid[iProfil] = true;
if (dynControl)
{
if (p.resQuality[iProfil] < 0)
p.resQuality[iProfil] = 0.0;
if (p.resCanny[iProfil] < 2 * p.threshold)
p.resQuality[iProfil] += 25 * (2 * p.threshold - p.resCanny[iProfil]) / p.threshold;
actGradLength = __min(actGradLength, 4.0f * p.dynSigma);
if (actGradLength > 2 * p.dynSigma)
p.resQuality[iProfil] += 12 * (actGradLength - 2 * p.dynSigma) / p.dynSigma;
}
p.results[iProfil] = x;
break;
}
if (!result)
p.ptValid[iProfil] = false;
return result;
}
__device__ bool SearchAroundZero( const ProfileEvaluationConstants& p, unsigned short* profile_16U , float* tempConvProfile, float* filterBuffer , float* tempVector , float& x, int iProfil, float fSearchRange, float fSearchRangeNeg, float staticTest,
float airPointsThresh, bool dynControl, int sign , bool& ptValid , float& resCanny, float& resQuality , float& result, float& dynSigma, float& shoch2)
{
//std::cout << "range factor : " << p.rangeFactor << std::endl;
bool isValid = true;
//assert(p.threshold > 0.0);
//assert(p.tempConvLength > 2 * p.coeffLength + p.searchRange + p.searchRangeNeg);
//assert(p.dynSigma > 0.3);
float dynThreshold = p.dynThreshold1;
//p.PutDynSigma(p.sigma); // TODO can be done a priori
PutDynSigma(p, p.sigma, dynSigma, shoch2);
//
// // Dyn. p.threshold evtl. rcksetzen
if (!p.dynThresholdControl) // TODO can be done a priori
dynThreshold = p.threshold;
if (dynThreshold > p.threshold) // TODO can be done a priori
dynThreshold = p.threshold;
resQuality = -1.0; // should be a parameter
float x_vw = 0.0, x_rw = 0.0; // Treffer der Vor- und Rckwrtssuche
// Vorhandenes Resultat verwenden
if ( ptValid != true || result > 1e6) // should be function parameter
{
ptValid = false;
//Fold second derivative over entire search area
int convProfileZeroIndex = FoldSecDerOpt( p , profile_16U , tempConvProfile );
int i_vw = convProfileZeroIndex, i_rw = convProfileZeroIndex; //Index of forward and backward search
bool hit_vw = false, hit_rw = false; //Threshold hit indicators
//std::cout << "convolution profile : " << p.tempConvProfile[100] << " " << p.tempConvProfile[150] << " " << p.tempConvProfile[250] << std::endl;
//printf("convolution profile gpu : %f %f %f \n : ", p.tempConvProfile[100], p.tempConvProfile[150], p.tempConvProfile[250]);
//Loop with equal forward and reverse sweep
while (1)
{
// Test search range and sign change 2.Abl.
// It is tested until the successor of i_vw, if there is no sign change,
// then no zero - at the whole coordinates of the opt-folding is exact!
if ( i_vw - convProfileZeroIndex < p.searchRange - 1 &&
sign * tempConvProfile[i_vw + 1] > 0 &&
sign * tempConvProfile[i_vw] < 0)
{
//Interpolation hits forward
x_vw = (i_vw + tempConvProfile[i_vw] / ( tempConvProfile[i_vw] - tempConvProfile[i_vw + 1]) - convProfileZeroIndex) * p.voxelStep;
//printf(" canny vw : %f ", p.Canny(x_vw, iProfil));
//if (sign * p.Canny(x_vw, iProfil) > sign * p.dynThreshold) // Schwellwertkriterium
if( sign * Canny( x_vw , p , profile_16U , filterBuffer , tempVector , dynSigma , shoch2) )
{
if (!hit_vw && !hit_rw)
{
hit_vw = true;
x = x_vw;
}
else
resQuality = 50.0;
}
}
//Test search range and sign change 2.Abl.
if ( convProfileZeroIndex - i_rw < p.searchRangeNeg - 1 && sign * tempConvProfile[i_rw] > 0 && sign * tempConvProfile[i_rw - 1] < 0)
{
//Interpolation hits backwards
x_rw = (i_rw - tempConvProfile[i_rw] / ( tempConvProfile[i_rw] - tempConvProfile[i_rw - 1]) - convProfileZeroIndex) * p.voxelStep;
//printf(" canny : %f ", p.Canny(x_rw, iProfil));
if (sign * Canny( x_rw, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2) > sign * dynThreshold) //threshold criterion
if (!hit_rw && !hit_vw)
{
hit_rw = true;
x = x_rw;
}
else if (hit_vw && !hit_rw)
{
hit_rw = true;
x = (x < -x_rw) ? x : x_rw;
}
else
resQuality = 50.0;
}
if (!dynControl && (hit_vw || hit_rw))
break; //Landed hits
i_vw++; i_rw--;
if (i_vw - convProfileZeroIndex > p.searchRange && convProfileZeroIndex - i_rw > p.searchRangeNeg)
break; //Search area browsed
}
if (!hit_vw && !hit_rw)
isValid = false;
//printf("\n hit found : %f %d %d %d \n", x_vw, hit_vw, hit_rw, isValid);
//printf("dynamic threshold %f %d %d %d %f \n", dynThreshold, sign, convProfileZeroIndex, p.searchRangeNeg , p.voxelStep);
}
else x = result;
if (isValid && dynControl)
isValid = NewtonMax( p , x , profile_16U, filterBuffer , tempVector , dynSigma , shoch2 ); //NewtonMax(p, x, iProfil); // Punkt genau gefunden?? Ergebnis in x!!!
if (isValid)
{
result = x;
}
return isValid;
#if 0
//printf("\n newton max : %f %d %f %f \n", x , isValid , dynSigma , shoch2);
if (isValid)
if (-x > fSearchRangeNeg || x > fSearchRange)
isValid = false;
while (isValid) // Genaue Bestimmung Nulldurchgang erfolgreich
{
bool dynCorr = false;
if (dynControl)
dynCorr = SetDynSigma(p, x, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); //SetDynSigma(p, x, iProfil);
if (dynCorr)
{
isValid = NewtonMax(p, x, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); //NewtonMax(p, x, iProfil);
dynThreshold = dynSigma / p.sigma * p.threshold; // Auch den Schwellwert anpassen, heuristisch...
if (!isValid)
break;
}
resCanny = Canny(x, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); //p.Canny(x, iProfil);
if ((sign * resCanny < sign * dynThreshold) // Gradientenschwellwert berschritten?
|| (x > fSearchRange)
|| (x < -fSearchRangeNeg))
{
isValid = false;
break;
}
float actGradLength = 0;
bool staticChecked = false;
// berprfung mit statischem Schwellwert
if (dynControl)
{
float high, low;
// Gradientensprunglnge und Endpunkte berechnen
actGradLength = GradientLength( p, x, profile_16U, tempConvProfile, filterBuffer, tempVector, ptValid, resCanny, &low, &high, resCanny , dynSigma , shoch2 ); //GradientLength(p, x, iProfil, &low, &high, p.resCanny[iProfil]);
if (low > 0 && high > 0)
staticChecked = true;
if (staticChecked && staticTest > 0)
{
if (staticTest > high || staticTest < low)
{
isValid = false;
break;
}
}
}
// Wenn die Berechnung der Gradientenlnge nicht funktioniert oder dynControl aus ist (Soll-Ist-Vergleich)
if (!staticChecked && staticTest > 0)
{
float lowValue = Gauss(x - 2 * p.sigma, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); //p.Gauss(x - 2 * p.sigma, iProfil);
float highValue = Gauss(x + 2 * p.sigma, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); //p.Gauss(x + 2 * p.sigma, iProfil);
if (lowValue > staticTest || highValue < staticTest)
{
isValid = false;
break;
}
}
// Luftpunkttest
if (airPointsThresh > 0)
{
float grayActual = Gauss( x , p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); // //p.Gauss(x, iProfil);
if (grayActual < airPointsThresh)
{
isValid = false;
break;
}
}
// Dynamischen p.threshold auf 75% des Maximums dieses Punkts setzen
//Set dynamic p.threshold to 75 % of the maximum of this point
if (p.dynThresholdControl)
dynThreshold = (float)fabs(Canny(x, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2)) * 3 / 4; //(float)fabs(p.Canny(x, iProfil)) * 3 / 4;
// Aber nicht grer als vorgeg. Schwellwert
//But not bigger than vorg. threshold
if (dynThreshold > p.threshold)
dynThreshold = p.threshold;
ptValid = true;
if (dynControl)
{
if ( resQuality < 0)
resQuality = 0.0;
if ( resCanny < 2 * p.threshold)
resQuality += 25 * (2 * p.threshold - resCanny ) / p.threshold;
actGradLength = __min(actGradLength, 4.0f * dynSigma);
if (actGradLength > 2 * dynSigma)
resQuality += 12 * (actGradLength - 2 * dynSigma) / dynSigma;
}
result = x;
break;
}
if ( !isValid )
ptValid = false;
return isValid;
#endif
}
__global__ void profileGradientMaxima_Kernel( unsigned short* prof, float* kernelData ,
int profileSize , int kernelSize , int numProfiles , float fSearchRange , float fSearchRangeNeg , int wB, float* results , bool* ptValid )
{
int y = threadIdx.y + blockIdx.y * blockDim.y;
int profileId = (y * wB + blockIdx.x) * blockDim.x + threadIdx.x;
if (profileId >= numProfiles)
return;
extern __shared__ float sharedKernelMemory[144];
extern __shared__ ProfileEvaluationConstants profileEvalParams;
//printf("%d \n", sizeof(ProfileEvaluationConstants));
int* profBuff = (int*)&profileEvalParams;
if (threadIdx.x < 16)
{
memcpy(sharedKernelMemory + 2 * threadIdx.x , constGaussKernelData.data + 2 * threadIdx.x , 2 * sizeof(float));
memcpy(sharedKernelMemory + 48 + 2 * threadIdx.x, constCannyKernelData.data + 2 * threadIdx.x, 2 * sizeof(float));
memcpy(sharedKernelMemory + 2 * 48 + 2 * threadIdx.x, constSecDerKernelData.data + 2 * threadIdx.x, 2 * sizeof(float));
}
else
{
memcpy(sharedKernelMemory + 16 + threadIdx.x, constGaussKernelData.data + 16 + threadIdx.x, sizeof(float));
memcpy(sharedKernelMemory + 48 + 16 + threadIdx.x, constCannyKernelData.data + 16 + threadIdx.x, sizeof(float));
memcpy(sharedKernelMemory + 2 * 48 + 16 + threadIdx.x, constSecDerKernelData.data + 16 + threadIdx.x, sizeof(float));
}
memcpy(profBuff + threadIdx.x * 2, profileEvaluatorData.data + 8 * threadIdx.x, 8);//copy 8 byte per threads
__syncthreads();
//printf(" %d \n", profileEvalParams.length);
//CCTProfilsEvaluationSP_Device profileEvaluation;
//auto ped = (unsigned char*)&profileEvaluation;
//int evalSize = sizeof(CCTProfilsEvaluationSP_Device);
//memcpy( ped , profileEvaluatorData.data , evalSize );
extern __shared__ unsigned short profileData[];
unsigned short* currentProfile = profileData + threadIdx.x * profileSize;
//printf(" profile size : %d \n", profileSize);
memcpy( currentProfile , prof + profileId * profileSize, profileSize * sizeof(unsigned short));
float* tempConvolutionData = (float*)( profileData + blockDim.x * profileSize );
float *kernelDataShared = (float*)( tempConvolutionData + blockDim.x * profileEvalParams.tempConvLength );
float* currentConvolutionData = tempConvolutionData + threadIdx.x * profileEvalParams.tempConvLength;
float resQuality, resCanny , result;
bool ptValidLocal;
//profileEvaluation.resQuality = &resQuality;
//profileEvaluation.resCanny = &resCanny;
//profileEvaluation.ptValid = &ptValidLocal;
//profileEvaluation.results = &result;
//profileEvaluation.ptValid[0] = false;
//profileEvaluation.results[0] = 0;
//profileEvaluation.tempConvProfile = currentConvolutionData;
//profileEvaluation.gaussCoeffs = sharedKernelMemory;
//profileEvaluation.cannyCoeffs = sharedKernelMemory + 48;
//profileEvaluation.secDerCoeffs = sharedKernelMemory + 2 * 48;
//profileEvaluation.filterCoeffs = kernelDataShared + threadIdx.x * kernelSize * 2;
//profileEvaluation.tempVector = profileEvaluation.filterCoeffs + kernelSize;
//profileEvaluation.profile_16U = currentProfile;
float xx = 0;
float* filterCoeffs = kernelDataShared + threadIdx.x * kernelSize * 2;
float* tempVector = filterCoeffs + kernelSize;
//ptValid[profileId] = SearchAroundZero( profileEvaluation , xx, 0, fSearchRange, fSearchRangeNeg, -1, -1, true, true);
float dynSigma = profileEvalParams.dynSigma1, shoch2 = profileEvalParams.shoch21;
profileEvalParams.gaussCoeffs = sharedKernelMemory;
profileEvalParams.cannyCoeffs = sharedKernelMemory + 48;
profileEvalParams.secDerCoeffs = sharedKernelMemory + 2 * 48;
//if (threadIdx.x == 0)
//{
// ptValid[profileId] = SearchAroundZero(profileEvaluation, xx, 0, fSearchRange, fSearchRangeNeg, -1, -1, true, true);
// printf("value of xx1 : %f %d \n", xx, ptValid[profileId]);
//float xx = 0;
result = 0;
ptValidLocal = false;
ptValid[profileId] = SearchAroundZero(profileEvalParams, currentProfile, currentConvolutionData, filterCoeffs, tempVector, xx, 0, fSearchRange,
fSearchRangeNeg, -1, -1, true, true, ptValidLocal, resCanny, resQuality, result, dynSigma, shoch2);
results[profileId] = xx;
//printf("value of xx2 : %f %d \n", xx, ptValid[profileId]);
//}
}
__global__ void Simple_Kernel()
{
printf("simple kernel \n");
}
void computeGradientBasedMaximaPoints( void* cpuProfiles , unsigned short* cpuProfileData , float* gaussKernelData ,
float* cannyKernelData , float *secDerKernelData , int numProfiles, int profileLength ,
int tempConvLength , int filterKernelSize , int singleProfileEvaluatorSize ,
int coeffLength , int searchRangeNeg , int zeroIndex , int searchRange )
{
//global memory for storing the profiles
unsigned short* profileMemoryDevice = 0;
//printf(" single profile evaluator size : %d \n", singleProfileEvaluatorSize);
//printf("gaussKernelData : %f %f %f %f %f \n", gaussKernelData[0], gaussKernelData[3], gaussKernelData[7], gaussKernelData[17], gaussKernelData[31]);
//printf("cannyKernelData : %f %f %f %f %f \n", cannyKernelData[0], cannyKernelData[3], cannyKernelData[7], cannyKernelData[17], cannyKernelData[31]);
//printf("secDerCoeffs : %f %f %f %f %f \n", secDerKernelData[0], secDerKernelData[3], secDerKernelData[7], secDerKernelData[17], secDerKernelData[31]);
//printf(" single profile evaluator size %d ", singleProfileEvaluatorSize);
HANDLE_ERROR(hipMemcpyToSymbol( profileEvaluatorData, cpuProfiles, singleProfileEvaluatorSize, 0 , hipMemcpyHostToDevice ));
HANDLE_ERROR(hipMemcpyToSymbol( constGaussKernelData, gaussKernelData, filterKernelSize * sizeof(float), 0, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpyToSymbol( constCannyKernelData, cannyKernelData, filterKernelSize * sizeof(float), 0, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpyToSymbol( constSecDerKernelData, secDerKernelData, filterKernelSize * sizeof(float), 0, hipMemcpyHostToDevice));
int shift = zeroIndex - searchRangeNeg - coeffLength;
int validLen = (2 * coeffLength + searchRange + searchRangeNeg + 1);
unsigned short* validProfileData = new unsigned short[ validLen * numProfiles ];
for ( int ii = 0; ii < numProfiles; ii++ )
{
memcpy(validProfileData + validLen * ii, cpuProfileData + ii * profileLength + shift , sizeof(unsigned short) * validLen);
}
//hipMalloc((void**)&profileMemoryDevice, numProfiles * profileLength * sizeof(unsigned short));
hipMalloc((void**)&profileMemoryDevice, numProfiles * validLen * sizeof(unsigned short));
//hipMemcpy(profileMemoryDevice, cpuProfiles, singleProfileSize * numProfiles, hipMemcpyHostToDevice);
//hipMemcpy(profileMemoryDevice, cpuProfileData, numProfiles * profileLength * sizeof(unsigned short), hipMemcpyHostToDevice);
hipMemcpy( profileMemoryDevice, validProfileData, numProfiles * validLen * sizeof(unsigned short), hipMemcpyHostToDevice);
int groupSize = 32;
dim3 threads(groupSize, 1);
float* resultsGPU;
bool* ptValidGPU;
hipMalloc( (void**)&resultsGPU, numProfiles * sizeof(float));
hipMalloc( (void**)&ptValidGPU, numProfiles * sizeof(bool));
int wB = 1024;//1;//
int nProfileSets = numProfiles / groupSize;
int nXBatches = 1;
if ( nProfileSets > wB)
{
nXBatches = nProfileSets % wB == 0 ? nProfileSets / wB : nProfileSets / wB + 1;
}
dim3 blocks(wB , nXBatches );//nXBatches
//tempConvLength = ::max(tempConvLength, 474);
printf("temp convolution length %d : \n", tempConvLength);
int sharedMemorySize = ( ( validLen * sizeof(unsigned short) + tempConvLength * sizeof(float) + 2 * filterKernelSize * sizeof(float)) * groupSize ) + 48 * 3 + 256 ;
printf("shared memory size %d \n ", sharedMemorySize);
//we need all the shared memory for computation
float* variableKernelData;
printf("number of blocks %d \n ", wB * nXBatches);
//Simple_Kernel << <1, 1 >> > ();
int* profileCountGPU;
hipMalloc( (void**)&profileCountGPU, sizeof(int));
hipMemset(profileCountGPU, 0, sizeof(int));
hipLaunchKernelGGL(( profileGradientMaxima_Kernel) , dim3(blocks), dim3(threads), sharedMemorySize , 0, profileMemoryDevice , variableKernelData, validLen ,
filterKernelSize, numProfiles, 40, 40, wB, resultsGPU, ptValidGPU);
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
printf("cuda kernel failure\n");
}
else
{
printf("kernel executed successfully \n");
}
HANDLE_ERROR(error);
//HANDLE_ERROR(hipDeviceSynchronize());
int profileCountCPU = 0;
hipMemcpy(&profileCountCPU, profileCountGPU, sizeof(int), hipMemcpyDeviceToHost);
printf("profile count gpu %d , actual number of profiles : %d : ", profileCountCPU, numProfiles);
}
}
}
} | 3fa5b897022a88b44b726543724e2d89b5806db3.cu | #define CUDA_LAUNCH_BLOCKING 1
#include "ctprofileevaluation.cuh"
#include <algorithm>
#include "stdio.h"
namespace imt
{
namespace volume
{
namespace cuda
{
static void HandleError(cudaError_t err,
const char *file,
int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
struct ProfileEvaluatorData {
char data[256];
};
struct KernelData
{
float data[48];
};
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//We put the copy of single profile evaluator into a char array and deserialize it into device profile evaluator , the advantage is
// that all the parameters since remain constant for a profile evaluator , we dont need to reinitialize it for all the threads , but rather we can simply copy it
__device__ __constant__ ProfileEvaluatorData profileEvaluatorData; //char profileEvaluatorData[256];
__device__ __constant__ KernelData constGaussKernelData , constCannyKernelData , constSecDerKernelData;
//these kernels stay constant during gradient based extrema estimation
__device__ __constant__ float gaussPreEvaluatedKernel[43], cannyPreEvaluatedKernel[43], secDerPreEvaluatedKernel[43];
__device__ void ippsConvert_16u32f(unsigned short* pSrc, float* pDst, int len)
{
for (int ll = 0; ll < len; ll++)
{
pDst[ll] = pSrc[ll];
}
}
__device__ void ippsSet_16s(short value, short* arr, int len)
{
for (int ll = 0; ll < len; ll++)
{
arr[ll] = value;
}
}
__device__ void ippsNorm_L2_32f(float* arr, int len, float* norm)
{
*norm = 0;
for (int ll = 0; ll < len; ll++)
{
*norm += arr[ll] * arr[ll];
}
*norm = sqrtf(*norm);
}
__device__ void ippsSqr_32f_I(float* coeffs, int length)
{
for (int ii = 0; ii < length; ii++)
{
coeffs[ii] = coeffs[ii] * coeffs[ii];
}
}
__device__ void ippsDivC_32f_I(float denom, float* arr, int length)
{
float invDenom = 1.0f / denom;
for (int ii = 0; ii < length; ii++)
{
arr[ii] *= invDenom; ///= denom; //can use fast inbuilt division function
}
}
__device__ void ippsExp_32f_I(float* arr, int length)
{
for (int ii = 0; ii < length; ii++)
{
arr[ii] = expf(arr[ii]);
}
}
__device__ void ippsCopy_32f(float *src, float* dst, int len)
{
memcpy(dst, src, len * sizeof(float));
//for (int ll = 0; ll < len; ll++)
//{
// dst[ll] = src[ll];
//}
}
__device__ void ippsCopy_32f(unsigned short *src, float* dst, int len)
{
for (int ii = 0; ii < len; ii++)
{
dst[ii] = src[ii];
}
//memcpy(dst, src, len * sizeof(float));
}
__device__ void ippsMul_32f_I(const float* pSrc, float* pSrcDst, int len)
{
for (int ii = 0; ii < len; ii++)
{
pSrcDst[ii] = pSrcDst[ii] * pSrc[ii];
}
}
__device__ void ippsAddC_32f_I(float val, float *srcDst, int length)
{
for (int ll = 0; ll < length; ll++)
{
srcDst[ll] += val;
}
}
__device__ int fillGaussCoeffsCUDA(float* gaussCoeffs, float shoch2, int length, float* tempVector)
{
ippsSqr_32f_I(gaussCoeffs, length);
ippsDivC_32f_I(-2.0f * shoch2, gaussCoeffs, length);
ippsExp_32f_I(gaussCoeffs, length);
return 0;
}
__device__ int fillCoeffsCannyCUDA(float* gaussCoeffs, float shoch2, int length, float* tempVector)
{
ippsSqr_32f_I(gaussCoeffs, length);
ippsDivC_32f_I(-2.0f * shoch2, gaussCoeffs, length);
ippsExp_32f_I(gaussCoeffs, length);
return 0;
}
__device__ int fillCannyCoeffsCUDA(float* cannyCoeffs, float shoch2, int length, float* t)
{
ippsCopy_32f(cannyCoeffs, t, length);
ippsSqr_32f_I(cannyCoeffs, length);
ippsDivC_32f_I(-2.0f*shoch2, cannyCoeffs, length);
ippsExp_32f_I(cannyCoeffs, length);
ippsDivC_32f_I(-shoch2, cannyCoeffs, length);
ippsMul_32f_I(t, cannyCoeffs, length);
return 0;
}
__device__ int fillSecDerCoeffsCUDA(float* secDerCoeffs, float shoch2, int length, float* t)
{
/*if (!t)
{
throw "Memory allocation failed";
}*/
ippsSqr_32f_I(secDerCoeffs, length);
ippsDivC_32f_I(-2.0f*shoch2, secDerCoeffs, length);
ippsCopy_32f(secDerCoeffs, t, length);
ippsExp_32f_I(secDerCoeffs, length);
ippsAddC_32f_I(0.5f, t, length);
ippsMul_32f_I(t, secDerCoeffs, length);
ippsDivC_32f_I(-0.5f*shoch2, secDerCoeffs, length);
return 0;
}
__device__ void ippsDotProd_32f(float* src1, float* src2, int len, float* result)
{
for (int ll = 0; ll < len; ll++)
{
*result += src1[ll] * src2[ll];
}
}
__device__ void ippsDotProd_32f(unsigned short* src1, float* src2, int len, float* result)
{
for (int ll = 0; ll < len; ll++)
{
*result += src1[ll] * src2[ll];
}
}
__device__ void ippsDotProd_32f(unsigned short* src1, unsigned short* src2, int len, float* result)
{
for (int ll = 0; ll < len; ll++)
{
*result += src1[ll] * src2[ll];
}
}
__device__ void ippsSub_32f_I(float* pSrc, float* pSrcDst, int length)
{
for (int ll = 0; ll < length; ll++)
{
pSrcDst[ll] -= pSrc[ll];
}
}
__device__ void ippsSub_32f_I(unsigned short* pSrc, float* pSrcDst, int length)
{
for (int ll = 0; ll < length; ll++)
{
pSrcDst[ll] -= pSrc[ll];
}
}
__device__ void ippsConv_32f(const float* pSrc1, int src1Len, const float* pSrc2, int src2Len, float* pDst)
{
int dstLen = src1Len + src2Len - 1;
for (int ll = 0; ll < dstLen; ll++)
{
float conv = 0;
int start = __max(0, ll - src2Len + 1);
int end = __min(ll, src1Len - 1);
for (int kk = start; kk <= end; kk++)
{
//int p = ll - kk;
conv += pSrc1[kk] * pSrc2[ll - kk];
}
pDst[ll] = conv;
}
}
__device__ void ippsConv_32f(const unsigned short* pSrc1, int src1Len, const float* pSrc2, int src2Len, float* pDst)
{
int dstLen = src1Len + src2Len - 1;
for (int ll = 0; ll < dstLen; ll++)
{
float conv = 0;
int start = __max(0, ll - src2Len + 1);
int end = __min(ll, src1Len - 1);
for (int kk = start; kk <= end; kk++)
{
//int p = ll - kk;
conv += pSrc1[kk] * pSrc2[ll - kk];
}
pDst[ll] = conv;
}
}
//pSrcDst[n] = pSrcDst[n] + pSrc[n]*val, 0 ≤ n < len
__device__ void ippsAddProductC_32f(const float* pSrc, const float val, float* pSrcDst, int len)
{
for (int ll = 0; ll < len; ll++)
{
pSrcDst[ll] += val * pSrc[ll];
}
}
__device__ void ippsMulC_32f_I(float val, float* pSrcDst, int length)
{
for (int ll = 0; ll < length; ll++)
{
pSrcDst[ll] *= val;
}
}
__device__ CCTProfilsEvaluationSP_Device::CCTProfilsEvaluationSP_Device()
{
voxelStep = 0.25;
//profile = NULL;
memoryAllocated = false;
length = 0;
nProfils = 0;
zeroIndex = 0;
gaussCoeffs = 0;
cannyCoeffs = 0;
secDerCoeffs = 0;
filterCoeffs = 0;
tempVector = 0;
sigma = 0.0;
threshold = 0.0;
voxelType = Void;
searchRange = 20;
searchRangeNeg = 0;
tempConvLength = 0;
tempConvProfile = 0;
results = NULL;
resCanny = NULL;
resQuality = NULL;
ptValid = NULL;
rangeFactor = 3.5;
nValid = 0;
}
// __device__ void CCTProfilsEvaluationSP_Device::Init()
//{
// //assert(sigma > 0.4);
// dynSigma = sigma;
// shoch2 = dynSigma * dynSigma;
// gaussCoeffs = 0;
// cannyCoeffs = 0;
// secDerCoeffs = 0;
// filterCoeffs = 0;
// tempVector = 0;
// searchRangeNeg = searchRange;
// dynThresholdControl = false;
// dynThreshold = threshold;
// tempConvProfile = 0;
// tempConvLength = 0;
// coeffLength = 0;
// PreCalc();
// firstValid = -1;
// lastValid = -1;
// results = NULL;
// resCanny = NULL;
// resQuality = NULL;
// ptValid = NULL;
// nAngle = 0;
// rangeFactor = 3.5;
// nValid = 0;
//}
__device__ CCTProfilsEvaluationSP_Device::~CCTProfilsEvaluationSP_Device(void)
{
//delete[] gaussCoeffs;
//delete[] cannyCoeffs;
//delete[] secDerCoeffs;
//delete[] filterCoeffs;
//delete[] tempVector;
////ZiTrace("del tempConvProfile Destruktor: %x alte Länge: %d\n",tempConvProfile,tempConvLength);
//delete[] tempConvProfile;
//if (memoryAllocated) delete[] profile;
//delete[] results;
//delete[] resCanny;
//delete[] resQuality;
//delete[] ptValid;
}
// Negativen Suchbereich abweichend von positivem setzen
//__device__ void CCTProfilsEvaluationSP_Device::SetSearchRangeNeg(float srNeg)
// {
// if (srNeg == 0.0)
// {
// searchRangeNeg = searchRange;
// }
// else
// {
// searchRangeNeg = (int)ceil(srNeg / voxelStep);
// }
// }
// // Suchbereich setzen
//__device__ void CCTProfilsEvaluationSP_Device::SetSearchRange(float sr)
// {
// searchRange = (int)ceil(sr / voxelStep);
// }
__device__ float Derivatives(float x, const ProfileEvaluationConstants& p, unsigned short* profile_16U,
float* filterCoeffs, float* tempVector , const float& dynSigma , const float& shoch2, int(*callback)(float*, float, int, float*))
{
//assert(sigma > 0.0);
int actFilterLength = int(p.rangeFactor * dynSigma / p.voxelStep);
//std::cout << "act filter length : " << actFilterLength<<" "<<dynSigma << std::endl;
//assert(actFilterLength <= coeffLength);
int filterIndex = int(floor(x / p.voxelStep)) + p.zeroIndex - actFilterLength; // Index Beginn Filtermaske
//assert(filterIndex >= 0 && filterIndex + 2 * actFilterLength + 1 < length);
filterCoeffs[0] = (float)((filterIndex - p.zeroIndex + 0.5) * p.voxelStep - x);
for (int ii = 1; ii < 2 * actFilterLength + 1; ii++)
{
filterCoeffs[ii] = filterCoeffs[ii - 1] + (float)p.voxelStep;
//printf("%f ", filterCoeffs[ii]);
}
callback(filterCoeffs, shoch2, 2 * actFilterLength, tempVector);
auto dat = profile_16U + filterIndex;
ippsCopy_32f(profile_16U + filterIndex, tempVector, 2 * actFilterLength + 1);
ippsSub_32f_I(profile_16U + filterIndex + 1, tempVector, 2 * actFilterLength + 1);
float result = 0;
ippsDotProd_32f(tempVector, filterCoeffs, 2 * actFilterLength, &result);
return -result;
}
// Gauss-gefilterter Wert
__device__ float CCTProfilsEvaluationSP_Device::Gauss(float x, int iProfil)
{
actFilterLength = int(rangeFactor * dynSigma / voxelStep);
//assert(actFilterLength <= coeffLength);
filterIndex = int(floor(x / voxelStep)) + zeroIndex - actFilterLength; // Index Beginn Filtermaske
if (x / voxelStep - floor(x / voxelStep) > 0.5)
filterIndex++;
//assert(filterIndex >= 0 && filterIndex + 2 * actFilterLength < length);
filterCoeffs[0] = (float)((filterIndex - zeroIndex) * voxelStep - x);
for (ii = 1; ii < 2 * actFilterLength + 1; ii++)
{
filterCoeffs[ii] = filterCoeffs[ii - 1] + (float)voxelStep;
}
fillGaussCoeffsCUDA(filterCoeffs, shoch2, 2 * actFilterLength + 1, tempVector);
result = 0;
ippsDotProd_32f(profile_16U + iProfil * length + filterIndex, filterCoeffs, 2 * actFilterLength + 1, &result);
return voxelStep * result / dynSigma / sqrtf(2 * M_PI);
}
__device__ float Gauss(float x, const ProfileEvaluationConstants& p, unsigned short* profile_16U, float* filterCoeffs, float* tempVector, const float& dynSigma , const float& shoch2 )
{
int actFilterLength = int( p.rangeFactor * dynSigma / p.voxelStep);
//assert(actFilterLength <= coeffLength);
int filterIndex = int(floor(x / p.voxelStep)) + p.zeroIndex - actFilterLength; // Index Beginn Filtermaske
if (x / p.voxelStep - floor(x / p.voxelStep) > 0.5)
filterIndex++;
//assert(filterIndex >= 0 && filterIndex + 2 * actFilterLength < length);
filterCoeffs[0] = (float)((filterIndex - p.zeroIndex) * p.voxelStep - x);
for ( int ii = 1; ii < 2 * actFilterLength + 1; ii++)
{
filterCoeffs[ii] = filterCoeffs[ii - 1] + (float)p.voxelStep;
}
fillGaussCoeffsCUDA(filterCoeffs, shoch2, 2 * actFilterLength + 1, tempVector);
float result = 0;
ippsDotProd_32f( profile_16U + filterIndex, filterCoeffs, 2 * actFilterLength + 1, &result);
return p.voxelStep * result / dynSigma / sqrtf(2 * M_PI);
}
// Erste gefilterte Ableitung - Canny
__device__ float CCTProfilsEvaluationSP_Device::Canny(float x, int iProfil)
{
//printf("[canny start gpu]\n");
float c = Derivatives(x, iProfil, &fillGaussCoeffsCUDA);
//printf("[Canny output %f]\n", c);
return c;
}
__device__ float Canny(float x, const ProfileEvaluationConstants& p, unsigned short* profile_16U, float* filterCoeffs, float* tempVector, const float& dynSigma, const float& shoch2)
{
//printf("[canny start gpu]\n");
float c = Derivatives(x, p, profile_16U, filterCoeffs, tempVector, dynSigma , shoch2 , &fillGaussCoeffsCUDA);
//printf("[Canny output %f]\n", c);
return c;
}
// Zweite gefilterte Ableitung - SecDer
__device__ float CCTProfilsEvaluationSP_Device::SecondDer(float x, int iProfil)
{
return Derivatives(x, iProfil, &fillCannyCoeffsCUDA);
}
__device__ float SecondDer(float x, const ProfileEvaluationConstants& p, unsigned short* profile_16U, float* filterCoeffs, float* tempVector , const float& dynSigma , const float& shoch2 )
{
return Derivatives( x, p, profile_16U, filterCoeffs, tempVector , dynSigma , shoch2 , &fillCannyCoeffsCUDA);
}
// Dritte gefilterte Ableitung - ThirdDer
__device__ float CCTProfilsEvaluationSP_Device::ThirdDer(float x, int iProfil)
{
return -Derivatives(x, iProfil, &fillSecDerCoeffsCUDA);
}
// Dritte gefilterte Ableitung - ThirdDer
__device__ float ThirdDer(float x, const ProfileEvaluationConstants& p, unsigned short* profile_16U , float* filterCoeffs , float* tempVector, const float& dynSigma, const float& shoch2)
{
return -Derivatives(x, p , profile_16U, filterCoeffs , tempVector, dynSigma , shoch2, &fillSecDerCoeffsCUDA);
}
// Basisfunktion für gefilterte Ableitungen des Grauwertprofils
//Basic function for filtered derivatives of the gray value profile
__device__ float CCTProfilsEvaluationSP_Device::Derivatives(float x, int iProfil, int(*callback)(float*, float, int, float*))
{
//assert(sigma > 0.0);
actFilterLength = int(rangeFactor * dynSigma / voxelStep);
//std::cout << "act filter length : " << actFilterLength<<" "<<dynSigma << std::endl;
//assert(actFilterLength <= coeffLength);
filterIndex = int(floor(x / voxelStep)) + zeroIndex - actFilterLength; // Index Beginn Filtermaske
//assert(filterIndex >= 0 && filterIndex + 2 * actFilterLength + 1 < length);
filterCoeffs[0] = (float)((filterIndex - zeroIndex + 0.5)*voxelStep - x);
for (ii = 1; ii < 2 * actFilterLength + 1; ii++)
{
filterCoeffs[ii] = filterCoeffs[ii - 1] + (float)voxelStep;
//printf("%f ", filterCoeffs[ii]);
}
callback(filterCoeffs, shoch2, 2 * actFilterLength, tempVector);
auto dat = profile_16U + iProfil * length + filterIndex;
ippsCopy_32f(profile_16U + iProfil * length + filterIndex, tempVector, 2 * actFilterLength + 1);
ippsSub_32f_I(profile_16U + iProfil * length + filterIndex + 1, tempVector, 2 * actFilterLength + 1);
result = 0;
ippsDotProd_32f(tempVector, filterCoeffs, 2 * actFilterLength, &result);
return -result;
}
__device__ float CCTProfilsEvaluationSP_Device::CannyOpt(int i, int iProfil)
{
//assert(i >= coeffLength && i + coeffLength < length);
result = 0;
ippsDotProd_32f(profile_16U + iProfil * length + i - coeffLength, gaussCoeffs, 2 * coeffLength + 1, &result);
return result;
}
__device__ float CannyOpt(int i , const ProfileEvaluationConstants& p, unsigned short* profile_16U)
{
//assert(i >= coeffLength && i + coeffLength < length);
float result = 0;
ippsDotProd_32f(profile_16U + i - p.coeffLength, p.gaussCoeffs, 2 * p.coeffLength + 1, &result);
return result;
}
__device__ float CCTProfilsEvaluationSP_Device::SecDerOpt(int i, int iProfil)
{
//assert(i >= coeffLength && i + coeffLength < length);
result = 0;
ippsDotProd_32f(profile_16U + iProfil * length + i - coeffLength, cannyCoeffs, 2 * coeffLength + 1, &result);
return result;
}
__device__ float SecDerOpt(int i , const ProfileEvaluationConstants& p, unsigned short* profile_16U )
{
//assert(i >= coeffLength && i + coeffLength < length);
float result = 0;
ippsDotProd_32f( profile_16U + i - p.coeffLength , p.cannyCoeffs , 2 * p.coeffLength + 1 , &result );
return result;
}
__device__ int CCTProfilsEvaluationSP_Device::FoldCannyOpt(int iProfil, float *cannyProfile)
{
//assert(cannyProfile);
//assert(zeroIndex - searchRangeNeg >= coeffLength && zeroIndex + searchRange + coeffLength < length);
ippsConv_32f(profile_16U + iProfil * length + zeroIndex - searchRangeNeg - coeffLength, 2 * coeffLength + searchRange + searchRangeNeg + 1, gaussCoeffs, 2 * coeffLength + 1, cannyProfile);
return searchRangeNeg + 2 * coeffLength; // Das ist der ZeroIndex
}
__device__ int FoldCannyOpt(const ProfileEvaluationConstants& p, unsigned short* profile_16U, float *cannyProfile)
{
//assert(cannyProfile);
//assert(zeroIndex - searchRangeNeg >= coeffLength && zeroIndex + searchRange + coeffLength < length);
ippsConv_32f( profile_16U + p.zeroIndex - p.searchRangeNeg - p.coeffLength,
2 * p.coeffLength + p.searchRange + p.searchRangeNeg + 1,
p.gaussCoeffs, 2 * p.coeffLength + 1, cannyProfile);
return p.searchRangeNeg + 2 * p.coeffLength; // Das ist der ZeroIndex
}
__device__ int CCTProfilsEvaluationSP_Device::FoldSecDerOpt(int iProfil, float *secDerProfile)
{
//assert(secDerProfile);
//assert(zeroIndex - searchRangeNeg >= coeffLength && zeroIndex + searchRange + coeffLength <= length);
ippsConv_32f( profile_16U + iProfil * length + zeroIndex - searchRangeNeg - coeffLength ,
2 * coeffLength + searchRange + searchRangeNeg + 1, cannyCoeffs, 2 * coeffLength + 1, secDerProfile);
//printf("%d %d %d \n", zeroIndex - searchRangeNeg - coeffLength, (2 * coeffLength + searchRange + searchRangeNeg + 1), 2 * coeffLength + 1);
return searchRangeNeg + 2 * coeffLength; // Das ist der ZeroIndex
}
__device__ int FoldSecDerOpt( const ProfileEvaluationConstants& p, unsigned short* profile_16U , float *secDerProfile)
{
//assert(secDerProfile);
//assert(zeroIndex - searchRangeNeg >= coeffLength && zeroIndex + searchRange + coeffLength <= length);
ippsConv_32f(profile_16U + p.zeroIndex - p.searchRangeNeg - p.coeffLength,
2 * p.coeffLength + p.searchRange + p.searchRangeNeg + 1, p.cannyCoeffs, 2 * p.coeffLength + 1, secDerProfile);
//printf("%d %d %d \n", zeroIndex - searchRangeNeg - coeffLength, (2 * coeffLength + searchRange + searchRangeNeg + 1), 2 * coeffLength + 1);
return p.searchRangeNeg + 2 * p.coeffLength; // Das ist der ZeroIndex
}
__device__ int CCTProfilsEvaluationSP_Device::FoldThirdDerOpt(int iProfil, float *thirdDerProfile, int convRangeNeg, int convRangePos)
{
//assert(thirdDerProfile);
if (!convRangeNeg || zeroIndex - convRangeNeg < coeffLength)
convRangeNeg = zeroIndex - coeffLength;
if (!convRangePos || zeroIndex + convRangePos + coeffLength >= length)
convRangePos = length - coeffLength - zeroIndex - 1;
//assert(zeroIndex - convRangeNeg >= coeffLength && zeroIndex + convRangePos + coeffLength < length);
ippsConv_32f(profile_16U + iProfil * length + zeroIndex - convRangeNeg - coeffLength,
2 * coeffLength + convRangePos + convRangeNeg + 1, secDerCoeffs,
2 * coeffLength + 1, thirdDerProfile);
return convRangeNeg + 2 * coeffLength; // Das ist der ZeroIndex
}
__device__ int FoldThirdDerOpt( const ProfileEvaluationConstants& p , unsigned short* profile_16U, float *thirdDerProfile, int convRangeNeg, int convRangePos)
{
//assert(thirdDerProfile);
if (!convRangeNeg || p.zeroIndex - convRangeNeg < p.coeffLength)
convRangeNeg = p.zeroIndex - p.coeffLength;
if (!convRangePos || p.zeroIndex + convRangePos + p.coeffLength >= p.length)
convRangePos = p.length - p.coeffLength - p.zeroIndex - 1;
//assert(zeroIndex - convRangeNeg >= coeffLength && zeroIndex + convRangePos + coeffLength < length);
ippsConv_32f( profile_16U + p.zeroIndex - convRangeNeg - p.coeffLength,
2 * p.coeffLength + convRangePos + convRangeNeg + 1, p.secDerCoeffs,
2 * p.coeffLength + 1, thirdDerProfile);
return convRangeNeg + 2 * p.coeffLength; // Das ist der ZeroIndex
}
// direct put dyn Sigma
__device__ void CCTProfilsEvaluationSP_Device::PutDynSigma(float newValue)
{
dynSigma = newValue;
shoch2 = dynSigma * dynSigma;
}
__device__ void PutDynSigma( const ProfileEvaluationConstants&p , float newValue , float& dynSigma , float& shoch2 )
{
dynSigma = newValue;
shoch2 = dynSigma * dynSigma;
}
// Dynamisches p.sigma begrenzen (kleiner als p.sigma und > 0.75)
__device__ bool SetDynSigma( CCTProfilsEvaluationSP_Device& p , float x, int iProfil)
{
// DPVector::const_iterator i;
float curThreshold = -0.1f*p.Canny(x, iProfil);
bool minBegrenzung = true, maxBegrenzung = true;
float minIndex = x, maxIndex = x, xx;
// Suche neg. Umkehrpunkt im Profil mit 10% Toleranz
do
{
minIndex -= p.voxelStep / 4;
} while (p.Canny(minIndex, iProfil) > curThreshold &&
(minIndex - x < 4 * p.sigma) &&
(minIndex / p.voxelStep > -p.searchRangeNeg));
// Überprüfen auf reale Gegenflanke ab 50% Höhe
xx = minIndex;
do
{
xx -= p.voxelStep / 4;
if (x - xx > 4 * p.sigma || (xx / p.voxelStep <= -p.searchRangeNeg))
break;
} while (minBegrenzung = (p.Canny(xx, iProfil) > 5 * curThreshold));
// Suche pos. Umkehrpunkt im Profil mit 10% Toleranz
curThreshold = -0.1f*p.Canny(x, iProfil);
do
{
maxIndex += p.voxelStep / 4;
} while (p.Canny(maxIndex, iProfil) > curThreshold &&
(maxIndex - x < 4 * p.sigma) &&
(maxIndex / p.voxelStep > p.searchRange));
// Überprüfen auf reale Gegenflanke ab 50% Höhe
xx = maxIndex;
do
{
xx += p.voxelStep / 4;
if (xx - x > 4 * p.sigma || xx / p.voxelStep >= p.searchRange)
break;
} while (maxBegrenzung = (p.Canny(xx, iProfil) > 5 * curThreshold));
// Wenn Gegenflanke, p.sigma eingernzen auf Abstand zum Umkehrpunkt
// DER FAKTOR 4.0 IST EXPERIMENTELL
// When counter - flanking, p.sigma is on the distance to the reversal point
// THE FACTOR 4.0 IS EXPERIMENTAL
if (!(minBegrenzung && maxBegrenzung))
p.dynSigma = (float)((maxIndex - x) < (x - minIndex) ? (maxIndex - x) : (x - minIndex)) / 4.0f;
else
{
p.dynSigma = p.sigma;
p.shoch2 = p.dynSigma* p.dynSigma;
return false;
}
// Bereich begrenzen
if (p.dynSigma > p.sigma)
{
p.dynSigma = p.sigma;
p.shoch2 = p.dynSigma* p.dynSigma;
return false;
}
if (p.dynSigma < 0.35f)
p.dynSigma = 0.35f;
p.shoch2 = p.dynSigma* p.dynSigma;
return true;
}
__device__ bool SetDynSigma(const ProfileEvaluationConstants& p, float x, unsigned short* profile_16U, float* filterBuffer, float* tempVector, float& dynSigma, float& shoch2 )
{
// DPVector::const_iterator i;
float curThreshold = -0.1f * Canny(x, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2 ); //p.Canny(x, iProfil);
bool minBegrenzung = true, maxBegrenzung = true;
float minIndex = x, maxIndex = x, xx;
// Suche neg. Umkehrpunkt im Profil mit 10% Toleranz
do
{
minIndex -= p.voxelStep / 4;
} while ( Canny(minIndex, p, profile_16U, filterBuffer, tempVector, dynSigma, shoch2) > curThreshold && //while (p.Canny(minIndex, iProfil) > curThreshold &&
(minIndex - x < 4 * p.sigma) &&
(minIndex / p.voxelStep > -p.searchRangeNeg));
// Überprüfen auf reale Gegenflanke ab 50% Höhe
xx = minIndex;
do
{
xx -= p.voxelStep / 4;
if (x - xx > 4 * p.sigma || (xx / p.voxelStep <= -p.searchRangeNeg))
break;
} while (minBegrenzung = (Canny(xx, p, profile_16U, filterBuffer, tempVector, dynSigma, shoch2) > 5 * curThreshold));
// Suche pos. Umkehrpunkt im Profil mit 10% Toleranz
curThreshold = -0.1f*Canny(x, p, profile_16U, filterBuffer, tempVector, dynSigma, shoch2);
do
{
maxIndex += p.voxelStep / 4;
} while (Canny(maxIndex, p, profile_16U, filterBuffer, tempVector, dynSigma, shoch2) > curThreshold &&
(maxIndex - x < 4 * p.sigma) &&
(maxIndex / p.voxelStep > p.searchRange));
// Überprüfen auf reale Gegenflanke ab 50% Höhe
xx = maxIndex;
do
{
xx += p.voxelStep / 4;
if (xx - x > 4 * p.sigma || xx / p.voxelStep >= p.searchRange)
break;
} while (maxBegrenzung = (Canny(xx, p, profile_16U, filterBuffer, tempVector, dynSigma, shoch2) > 5 * curThreshold));
// Wenn Gegenflanke, p.sigma eingernzen auf Abstand zum Umkehrpunkt
// DER FAKTOR 4.0 IST EXPERIMENTELL
// When counter - flanking, p.sigma is on the distance to the reversal point
// THE FACTOR 4.0 IS EXPERIMENTAL
if (!(minBegrenzung && maxBegrenzung))
dynSigma = (float)((maxIndex - x) < (x - minIndex) ? (maxIndex - x) : (x - minIndex)) / 4.0f;
else
{
dynSigma = p.sigma;
shoch2 = dynSigma * dynSigma;
return false;
}
// Bereich begrenzen
if ( dynSigma > p.sigma)
{
dynSigma = p.sigma;
shoch2 = dynSigma * dynSigma;
return false;
}
if ( dynSigma < 0.35f)
dynSigma = 0.35f;
shoch2 = dynSigma * dynSigma;
return true;
}
__device__ bool NewtonMax( CCTProfilsEvaluationSP_Device& p , float& x, int iProfil)
{
bool result = true;
float start_x = x;
float z;
int it = 0;
float lastZ;
//printf("start x : %f \n", start_x);
do
{
z = p.ThirdDer(x, iProfil);
if (z == 0) {
result = false;
break;
}
z = p.SecondDer(x, iProfil) / z; // Neue Schrittweite
//printf("z %f : ", z);
if (it == 0 && fabs(z) > 1.0f)
z *= 0.1f;
if (fabs(z) > 3.0f) // konvergiert offenbar nicht, empirisch gewonnen
{
result = false;
break;
}
if (it > 0 && std::abs(z + lastZ) < 0.01f)
z *= 0.5f;
x = x - z; // Korrektur anwenden
//printf("%f ", x);
lastZ = z;
if (it++ > 25) // Endlositeration
{
result = false;
break;
}
} while (fabs(z) > 0.001); // 0.001 bezieht sich auf Voxelmass und sollte ausreichen
//printf("\n");
if (!result)
x = start_x;
return result;
}
__device__ bool NewtonMax( const ProfileEvaluationConstants& p, float& x, unsigned short* profile_16U, float* filterBuffer, float* tempVector, const float& dynSigma, const float& shoch2)
{
bool result = true;
float start_x = x;
float z;
int it = 0;
float lastZ;
//printf("start x : %f \n", start_x);
do
{
z = ThirdDer(x, p , profile_16U , filterBuffer , tempVector, dynSigma, shoch2);
if (z == 0) {
result = false;
break;
}
z = SecondDer(x, p, profile_16U, filterBuffer, tempVector, dynSigma , shoch2) / z; //p.SecondDer(x, iProfil) / z; // Neue Schrittweite
if (it == 0 && fabs(z) > 1.0f)
z *= 0.1f;
if (fabs(z) > 3.0f) // konvergiert offenbar nicht, empirisch gewonnen
{
result = false;
break;
}
if (it > 0 && std::abs(z + lastZ) < 0.01f)
z *= 0.5f;
x = x - z; // Korrektur anwenden
//printf("%f ", x);
lastZ = z;
if (it++ > 25) // Endlositeration
{
result = false;
break;
}
} while (fabs(z) > 0.001); // 0.001 bezieht sich auf Voxelmass und sollte ausreichen
//printf("\n ", x);
if (!result)
x = start_x;
return result;
}
__device__ float GradientLength(CCTProfilsEvaluationSP_Device& p, float x, int iProfil, float* gaussLow, float* gaussHigh, float xCanny)
{
if (xCanny == 0 && p.ptValid[iProfil])
xCanny = p.resCanny[iProfil];
int sign = 1;
if (xCanny < 0) sign = -1; // Sprung abwärts (interessant für Mehr-Material)
// Suche des Parameters mit 50% xCanny (Maximalwert)
int iLow = (int)floor((x) / p.voxelStep);
int iBase = iLow;
while (sign * p.SecDerOpt(iLow + p.zeroIndex, iProfil) > -0.25*sign * xCanny / p.dynSigma && (iBase - iLow) * p.voxelStep <= 5.0 && (iLow + p.zeroIndex > p.coeffLength))
iLow--;
if (!((iBase - iLow)*p.voxelStep <= 5.0))
iLow = iBase - 1;
int iHigh = iBase + 1;
while (sign*p.SecDerOpt(iHigh + p.zeroIndex, iProfil) < 0.25*sign*xCanny / p.dynSigma && (iHigh - iBase)*p.voxelStep < 5.0 && (iHigh + p.zeroIndex < p.length - p.coeffLength - 1))
iHigh++;
if (!((iHigh - iBase)*p.voxelStep < 5.0))
iHigh = iBase + 1;
// Faltung dritte Ableitung +/- 10 Voxel um x
int searchRangeRoot = int(10.0 / p.voxelStep);
int coeffDistance = int(p.coeffLength / p.voxelStep);
if (p.zeroIndex + iBase - searchRangeRoot <= coeffDistance)
searchRangeRoot = p.zeroIndex + iBase - coeffDistance;
if (p.zeroIndex + iBase + searchRangeRoot >= p.length - coeffDistance)
searchRangeRoot = searchRangeRoot - (p.zeroIndex + iBase + coeffDistance);
int foldZeroIndex = p.FoldThirdDerOpt(iProfil, p.tempConvProfile, -iBase + searchRangeRoot, iBase + searchRangeRoot);
// Suche nach Nullstelle in dritter Ableitung Luftseite
iHigh += foldZeroIndex;
iLow += foldZeroIndex;
iBase += foldZeroIndex;
float x_vw = 0.0, x_rw = 0.0; // Treffer der Vor- und Rückwärtssuche
bool hit_vw = false, hit_rw = false; // Indikatoren für Treffer mit Schwellwert
// Loop mit gleichteitiger Vor- und Rückwärtssuceh
while (1)
{
// Test Suchbereich und Vorzeichenwechsel 2.Abl.
if ((iHigh - iBase) * p.voxelStep <= searchRangeRoot * p.voxelStep && sign*p.tempConvProfile[iHigh + 1] < 0 && sign*p.tempConvProfile[iHigh]>0)
{
// Interpolation Treffer vorwärts
x_vw = (iHigh + p.tempConvProfile[iHigh] / (p.tempConvProfile[iHigh] - p.tempConvProfile[iHigh + 1]) - foldZeroIndex)*p.voxelStep;
int iTest = (int)floor(x_vw / p.voxelStep + 0.5);
float t = sign * p.CannyOpt(/*iHigh - foldZeroIndex*/iTest + p.zeroIndex, iProfil);
if (t > 0.05*sign*xCanny && t<0.85*sign*xCanny && sign*p.SecDerOpt(/*iHigh - foldZeroIndex*/iTest + p.zeroIndex, iProfil)>0.15*sign*xCanny / p.dynSigma) hit_vw = true;
}
// Test Suchbereich und Vorzeichenwechsel 2.Abl.
if ((iBase - iLow)*p.voxelStep <= searchRangeRoot * p.voxelStep && sign*p.tempConvProfile[iLow] > 0 && sign*p.tempConvProfile[iLow - 1] < 0)
{
// Interpolation Treffer rückwärts
x_rw = (iLow - p.tempConvProfile[iLow] / (p.tempConvProfile[iLow] - p.tempConvProfile[iLow - 1]) - foldZeroIndex)*p.voxelStep;
int iTest = (int)floor(x_rw / p.voxelStep + 0.5);
float t = sign * p.CannyOpt(/*iLow - foldZeroIndex*/iTest + p.zeroIndex, iProfil);
if (t > 0.05*sign*xCanny && t < 0.85*sign*xCanny && sign*p.SecDerOpt(/*iLow - foldZeroIndex*/iTest + p.zeroIndex, iProfil) < -0.15*sign*xCanny / p.dynSigma) hit_rw = true;
}
if (hit_vw && hit_rw)
break; // beide Grenzen gefunden
if ((iBase - iLow)*p.voxelStep >= searchRangeRoot * p.voxelStep || (iHigh - iBase)*p.voxelStep >= searchRangeRoot * p.voxelStep)
break; // Suchbereich abgegrast
iHigh++; iLow--;
}
if (hit_vw && hit_rw)
{
if (sign == -1)
{
if (gaussLow) *gaussLow = p.Gauss(x_vw, iProfil);
if (gaussHigh) *gaussHigh = p.Gauss(x_rw, iProfil);
}
else
{
if (gaussLow) *gaussLow = p.Gauss(x_rw, iProfil);
if (gaussHigh) *gaussHigh = p.Gauss(x_vw, iProfil);
}
return x_vw - x_rw; // Differenz zwischen Wendepunkten ist gesuchte Kenngröße
}
else
{
if (gaussLow) *gaussLow = 0;
if (gaussHigh) *gaussHigh = 0;
return 0.0;
}
}
__device__ float GradientLength( const ProfileEvaluationConstants& p, float x, unsigned short* profile_16U , float* tempConvProfile , float* filterBuffer, float* tempVector,
bool& ptValid , float& resCanny , float* gaussLow, float* gaussHigh, float xCanny, const float& dynSigma, const float& shoch2)
{
/*if (xCanny == 0 && p.ptValid[iProfil])
xCanny = p.resCanny[iProfil];
*/
if (xCanny == 0 && ptValid)
xCanny = resCanny;
int sign = 1;
if (xCanny < 0) sign = -1; // Sprung abwärts (interessant für Mehr-Material)
// Suche des Parameters mit 50% xCanny (Maximalwert)
int iLow = (int)floor((x) / p.voxelStep);
int iBase = iLow;
//while (sign * p.SecDerOpt(iLow + p.zeroIndex, iProfil) > -0.25*sign * xCanny / p.dynSigma && (iBase - iLow) * p.voxelStep <= 5.0 && (iLow + p.zeroIndex > p.coeffLength))
while (sign * SecDerOpt(iLow + p.zeroIndex, p , profile_16U) > -0.25*sign * xCanny / dynSigma && (iBase - iLow) * p.voxelStep <= 5.0 && (iLow + p.zeroIndex > p.coeffLength))
iLow--;
if (!((iBase - iLow)*p.voxelStep <= 5.0))
iLow = iBase - 1;
int iHigh = iBase + 1;
//while (sign*p.SecDerOpt(iHigh + p.zeroIndex, iProfil) < 0.25*sign*xCanny / p.dynSigma && (iHigh - iBase)*p.voxelStep < 5.0 && (iHigh + p.zeroIndex < p.length - p.coeffLength - 1))
while ( sign * SecDerOpt( iHigh + p.zeroIndex, p, profile_16U ) < 0.25*sign*xCanny / dynSigma && (iHigh - iBase)*p.voxelStep < 5.0 && (iHigh + p.zeroIndex < p.length - p.coeffLength - 1))
iHigh++;
if (!((iHigh - iBase)*p.voxelStep < 5.0))
iHigh = iBase + 1;
// Faltung dritte Ableitung +/- 10 Voxel um x
int searchRangeRoot = int(10.0 / p.voxelStep);
int coeffDistance = int(p.coeffLength / p.voxelStep);
if (p.zeroIndex + iBase - searchRangeRoot <= coeffDistance)
searchRangeRoot = p.zeroIndex + iBase - coeffDistance;
if (p.zeroIndex + iBase + searchRangeRoot >= p.length - coeffDistance)
searchRangeRoot = searchRangeRoot - (p.zeroIndex + iBase + coeffDistance);
int foldZeroIndex = FoldThirdDerOpt(p , profile_16U, tempConvProfile, -iBase + searchRangeRoot, iBase + searchRangeRoot); //p.FoldThirdDerOpt(iProfil, p.tempConvProfile, -iBase + searchRangeRoot, iBase + searchRangeRoot);
// Suche nach Nullstelle in dritter Ableitung Luftseite
iHigh += foldZeroIndex;
iLow += foldZeroIndex;
iBase += foldZeroIndex;
float x_vw = 0.0, x_rw = 0.0; // Treffer der Vor- und Rückwärtssuche
bool hit_vw = false, hit_rw = false; // Indikatoren für Treffer mit Schwellwert
// Loop mit gleichteitiger Vor- und Rückwärtssuceh
while (1)
{
// Test Suchbereich und Vorzeichenwechsel 2.Abl.
if ((iHigh - iBase) * p.voxelStep <= searchRangeRoot * p.voxelStep && sign * tempConvProfile[iHigh + 1] < 0 && sign * tempConvProfile[iHigh]>0)
{
// Interpolation Treffer vorwärts
x_vw = ( iHigh + tempConvProfile[iHigh] / ( tempConvProfile[iHigh] - tempConvProfile[iHigh + 1] ) - foldZeroIndex)*p.voxelStep;
int iTest = (int)floor(x_vw / p.voxelStep + 0.5);
float t = sign * CannyOpt(iTest + p.zeroIndex, p, profile_16U); //p.CannyOpt(/*iHigh - foldZeroIndex*/iTest + p.zeroIndex, iProfil);
//if (t > 0.05*sign*xCanny && t<0.85*sign*xCanny && sign*p.SecDerOpt(/*iHigh - foldZeroIndex*/iTest + p.zeroIndex, iProfil)>0.15*sign*xCanny / p.dynSigma)
if (t > 0.05*sign*xCanny && t<0.85*sign*xCanny && sign * SecDerOpt(iTest + p.zeroIndex, p , profile_16U) > 0.15*sign*xCanny / dynSigma)
hit_vw = true;
}
// Test Suchbereich und Vorzeichenwechsel 2.Abl.
if ((iBase - iLow)*p.voxelStep <= searchRangeRoot * p.voxelStep && sign * tempConvProfile[iLow] > 0 && sign * tempConvProfile[iLow - 1] < 0)
{
// Interpolation Treffer rückwärts
x_rw = (iLow - tempConvProfile[iLow] / ( tempConvProfile[iLow] - tempConvProfile[iLow - 1]) - foldZeroIndex)*p.voxelStep;
int iTest = (int)floor(x_rw / p.voxelStep + 0.5);
float t = sign * CannyOpt(iTest + p.zeroIndex, p, profile_16U); //p.CannyOpt(/*iLow - foldZeroIndex*/iTest + p.zeroIndex, iProfil);
//if (t > 0.05*sign*xCanny && t < 0.85*sign*xCanny && sign*p.SecDerOpt(/*iLow - foldZeroIndex*/iTest + p.zeroIndex, iProfil) < -0.15*sign*xCanny / p.dynSigma)
if (t > 0.05*sign*xCanny && t < 0.85*sign*xCanny && sign * SecDerOpt(/*iLow - foldZeroIndex*/iTest + p.zeroIndex, p , profile_16U) < -0.15*sign*xCanny / dynSigma)
hit_rw = true;
}
if (hit_vw && hit_rw)
break; // beide Grenzen gefunden
if ((iBase - iLow)*p.voxelStep >= searchRangeRoot * p.voxelStep || (iHigh - iBase)*p.voxelStep >= searchRangeRoot * p.voxelStep)
break; // Suchbereich abgegrast
iHigh++; iLow--;
}
if (hit_vw && hit_rw)
{
if (sign == -1)
{
if (gaussLow) *gaussLow = Gauss( x_vw, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2 ); //p.Gauss(x_vw, iProfil);
if (gaussHigh) *gaussHigh = Gauss( x_rw, p, profile_16U, filterBuffer, tempVector, dynSigma , shoch2 ); ////p.Gauss(x_rw, iProfil);
}
else
{
if (gaussLow) *gaussLow = Gauss(x_rw, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); // //p.Gauss(x_rw, iProfil);
if (gaussHigh) *gaussHigh = Gauss(x_vw, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); // //p.Gauss(x_vw, iProfil);
}
return x_vw - x_rw; // Differenz zwischen Wendepunkten ist gesuchte Kenngröße
}
else
{
if (gaussLow) *gaussLow = 0;
if (gaussHigh) *gaussHigh = 0;
return 0.0;
}
}
__device__ bool SearchAroundZero(CCTProfilsEvaluationSP_Device& p, float& x, int iProfil, float fSearchRange, float fSearchRangeNeg, float staticTest,
float airPointsThresh, bool dynControl, int sign)
{
//std::cout << "range factor : " << p.rangeFactor << std::endl;
bool result = true;
//assert(p.threshold > 0.0);
//assert(p.tempConvLength > 2 * p.coeffLength + p.searchRange + p.searchRangeNeg);
//assert(p.dynSigma > 0.3);
p.PutDynSigma(p.sigma); // für jeden Punkt zurücksetzen !
// Dyn. p.threshold evtl. rücksetzen
if (!p.dynThresholdControl)
p.dynThreshold = p.threshold;
if (p.dynThreshold > p.threshold)
p.dynThreshold = p.threshold;
p.resQuality[iProfil] = -1.0;
float x_vw = 0.0, x_rw = 0.0; // Treffer der Vor- und Rückwärtssuche
// Vorhandenes Resultat verwenden
if (p.ptValid[iProfil] != true || p.results[iProfil] > 1e6)
{
p.ptValid[iProfil] = false;
//Fold second derivative over entire search area
p.convProfileZeroIndex = p.FoldSecDerOpt(iProfil, p.tempConvProfile);
int i_vw = p.convProfileZeroIndex, i_rw = p.convProfileZeroIndex; //Index of forward and backward search
bool hit_vw = false, hit_rw = false; //Threshold hit indicators
//std::cout << "convolution profile : " << p.tempConvProfile[100] << " " << p.tempConvProfile[150] << " " << p.tempConvProfile[250] << std::endl;
//printf("convolution profile gpu : %f %f %f \n : ", p.tempConvProfile[100], p.tempConvProfile[150], p.tempConvProfile[250]);
//Loop with equal forward and reverse sweep
while (1)
{
// Test search range and sign change 2.Abl.
// It is tested until the successor of i_vw, if there is no sign change,
// then no zero - at the whole coordinates of the opt-folding is exact!
if (i_vw - p.convProfileZeroIndex < p.searchRange - 1 &&
sign * p.tempConvProfile[i_vw + 1] > 0 &&
sign * p.tempConvProfile[i_vw] < 0)
{
//Interpolation hits forward
x_vw = (i_vw + p.tempConvProfile[i_vw] / (p.tempConvProfile[i_vw] - p.tempConvProfile[i_vw + 1]) - p.convProfileZeroIndex) * p.voxelStep;
//printf(" canny vw : %f ", p.Canny(x_vw, iProfil));
if (sign * p.Canny(x_vw, iProfil) > sign * p.dynThreshold) // Schwellwertkriterium
{
if (!hit_vw && !hit_rw)
{
hit_vw = true;
x = x_vw;
}
else
p.resQuality[iProfil] = 50.0;
}
}
//Test search range and sign change 2.Abl.
if (p.convProfileZeroIndex - i_rw < p.searchRangeNeg - 1 && sign * p.tempConvProfile[i_rw] > 0 && sign * p.tempConvProfile[i_rw - 1] < 0)
{
//Interpolation hits backwards
x_rw = (i_rw - p.tempConvProfile[i_rw] / (p.tempConvProfile[i_rw] - p.tempConvProfile[i_rw - 1]) - p.convProfileZeroIndex) * p.voxelStep;
//printf(" canny : %f ", p.Canny(x_rw, iProfil));
if (sign * p.Canny(x_rw, iProfil) > sign * p.dynThreshold) //threshold criterion
if (!hit_rw && !hit_vw)
{
hit_rw = true;
x = x_rw;
}
else if (hit_vw && !hit_rw)
{
hit_rw = true;
x = (x < -x_rw) ? x : x_rw;
}
else p.resQuality[iProfil] = 50.0;
}
if (!dynControl && (hit_vw || hit_rw))
break; //Landed hits
i_vw++; i_rw--;
if (i_vw - p.convProfileZeroIndex > p.searchRange && p.convProfileZeroIndex - i_rw > p.searchRangeNeg)
break; //Search area browsed
}
if (!hit_vw && !hit_rw)
result = false;
printf("\n hit found : %f %d %d %d \n", x_vw, hit_vw, hit_rw, result);
printf("dynamic threshold %f %d %d %d %f \n", p.dynThreshold, sign, p.convProfileZeroIndex, p.searchRangeNeg , p.voxelStep);
}
else x = p.results[iProfil];
if (result && dynControl)
result = NewtonMax( p , x, iProfil); // Punkt genau gefunden?? Ergebnis in x!!!
printf("\n newton max : %f %d %f %f\n", x, result , p.dynSigma , p.shoch2);
if (result)
if (-x > fSearchRangeNeg || x > fSearchRange)
result = false;
while (result) // Genaue Bestimmung Nulldurchgang erfolgreich
{
bool dynCorr = false;
if (dynControl)
dynCorr = SetDynSigma( p , x, iProfil);
if (dynCorr)
{
result = NewtonMax( p , x, iProfil);
p.dynThreshold = p.dynSigma / p.sigma*p.threshold; // Auch den Schwellwert anpassen, heuristisch...
if (!result)
break;
}
p.resCanny[iProfil] = p.Canny(x, iProfil);
if ((sign*p.resCanny[iProfil] < sign*p.dynThreshold) // Gradientenschwellwert überschritten?
|| (x > fSearchRange)
|| (x < -fSearchRangeNeg))
{
result = false;
break;
}
float actGradLength = 0;
bool staticChecked = false;
// Überprüfung mit statischem Schwellwert
if (dynControl)
{
float high, low;
// Gradientensprunglänge und Endpunkte berechnen
actGradLength = GradientLength( p , x, iProfil, &low, &high, p.resCanny[iProfil]);
if (low > 0 && high > 0)
staticChecked = true;
if (staticChecked && staticTest > 0)
{
if (staticTest > high || staticTest < low)
{
result = false;
break;
}
}
}
// Wenn die Berechnung der Gradientenlänge nicht funktioniert oder dynControl aus ist (Soll-Ist-Vergleich)
if (!staticChecked && staticTest > 0)
{
float lowValue = p.Gauss(x - 2 * p.sigma, iProfil);
float highValue = p.Gauss(x + 2 * p.sigma, iProfil);
if (lowValue > staticTest || highValue < staticTest)
{
result = false;
break;
}
}
// Luftpunkttest
if (airPointsThresh > 0)
{
float grayActual = p.Gauss(x, iProfil);
if (grayActual < airPointsThresh)
{
result = false;
break;
}
}
// Dynamischen p.threshold auf 75% des Maximums dieses Punkts setzen
//Set dynamic p.threshold to 75 % of the maximum of this point
if (p.dynThresholdControl)
p.dynThreshold = (float)fabs(p.Canny(x, iProfil)) * 3 / 4;
// Aber nicht größer als vorgeg. Schwellwert
//But not bigger than vorg. threshold
if (p.dynThreshold > p.threshold)
p.dynThreshold = p.threshold;
p.ptValid[iProfil] = true;
if (dynControl)
{
if (p.resQuality[iProfil] < 0)
p.resQuality[iProfil] = 0.0;
if (p.resCanny[iProfil] < 2 * p.threshold)
p.resQuality[iProfil] += 25 * (2 * p.threshold - p.resCanny[iProfil]) / p.threshold;
actGradLength = __min(actGradLength, 4.0f * p.dynSigma);
if (actGradLength > 2 * p.dynSigma)
p.resQuality[iProfil] += 12 * (actGradLength - 2 * p.dynSigma) / p.dynSigma;
}
p.results[iProfil] = x;
break;
}
if (!result)
p.ptValid[iProfil] = false;
return result;
}
__device__ bool SearchAroundZero( const ProfileEvaluationConstants& p, unsigned short* profile_16U , float* tempConvProfile, float* filterBuffer , float* tempVector , float& x, int iProfil, float fSearchRange, float fSearchRangeNeg, float staticTest,
float airPointsThresh, bool dynControl, int sign , bool& ptValid , float& resCanny, float& resQuality , float& result, float& dynSigma, float& shoch2)
{
//std::cout << "range factor : " << p.rangeFactor << std::endl;
bool isValid = true;
//assert(p.threshold > 0.0);
//assert(p.tempConvLength > 2 * p.coeffLength + p.searchRange + p.searchRangeNeg);
//assert(p.dynSigma > 0.3);
float dynThreshold = p.dynThreshold1;
//p.PutDynSigma(p.sigma); // TODO can be done a priori
PutDynSigma(p, p.sigma, dynSigma, shoch2);
//
// // Dyn. p.threshold evtl. rücksetzen
if (!p.dynThresholdControl) // TODO can be done a priori
dynThreshold = p.threshold;
if (dynThreshold > p.threshold) // TODO can be done a priori
dynThreshold = p.threshold;
resQuality = -1.0; // should be a parameter
float x_vw = 0.0, x_rw = 0.0; // Treffer der Vor- und Rückwärtssuche
// Vorhandenes Resultat verwenden
if ( ptValid != true || result > 1e6) // should be function parameter
{
ptValid = false;
//Fold second derivative over entire search area
int convProfileZeroIndex = FoldSecDerOpt( p , profile_16U , tempConvProfile );
int i_vw = convProfileZeroIndex, i_rw = convProfileZeroIndex; //Index of forward and backward search
bool hit_vw = false, hit_rw = false; //Threshold hit indicators
//std::cout << "convolution profile : " << p.tempConvProfile[100] << " " << p.tempConvProfile[150] << " " << p.tempConvProfile[250] << std::endl;
//printf("convolution profile gpu : %f %f %f \n : ", p.tempConvProfile[100], p.tempConvProfile[150], p.tempConvProfile[250]);
//Loop with equal forward and reverse sweep
while (1)
{
// Test search range and sign change 2.Abl.
// It is tested until the successor of i_vw, if there is no sign change,
// then no zero - at the whole coordinates of the opt-folding is exact!
if ( i_vw - convProfileZeroIndex < p.searchRange - 1 &&
sign * tempConvProfile[i_vw + 1] > 0 &&
sign * tempConvProfile[i_vw] < 0)
{
//Interpolation hits forward
x_vw = (i_vw + tempConvProfile[i_vw] / ( tempConvProfile[i_vw] - tempConvProfile[i_vw + 1]) - convProfileZeroIndex) * p.voxelStep;
//printf(" canny vw : %f ", p.Canny(x_vw, iProfil));
//if (sign * p.Canny(x_vw, iProfil) > sign * p.dynThreshold) // Schwellwertkriterium
if( sign * Canny( x_vw , p , profile_16U , filterBuffer , tempVector , dynSigma , shoch2) )
{
if (!hit_vw && !hit_rw)
{
hit_vw = true;
x = x_vw;
}
else
resQuality = 50.0;
}
}
//Test search range and sign change 2.Abl.
if ( convProfileZeroIndex - i_rw < p.searchRangeNeg - 1 && sign * tempConvProfile[i_rw] > 0 && sign * tempConvProfile[i_rw - 1] < 0)
{
//Interpolation hits backwards
x_rw = (i_rw - tempConvProfile[i_rw] / ( tempConvProfile[i_rw] - tempConvProfile[i_rw - 1]) - convProfileZeroIndex) * p.voxelStep;
//printf(" canny : %f ", p.Canny(x_rw, iProfil));
if (sign * Canny( x_rw, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2) > sign * dynThreshold) //threshold criterion
if (!hit_rw && !hit_vw)
{
hit_rw = true;
x = x_rw;
}
else if (hit_vw && !hit_rw)
{
hit_rw = true;
x = (x < -x_rw) ? x : x_rw;
}
else
resQuality = 50.0;
}
if (!dynControl && (hit_vw || hit_rw))
break; //Landed hits
i_vw++; i_rw--;
if (i_vw - convProfileZeroIndex > p.searchRange && convProfileZeroIndex - i_rw > p.searchRangeNeg)
break; //Search area browsed
}
if (!hit_vw && !hit_rw)
isValid = false;
//printf("\n hit found : %f %d %d %d \n", x_vw, hit_vw, hit_rw, isValid);
//printf("dynamic threshold %f %d %d %d %f \n", dynThreshold, sign, convProfileZeroIndex, p.searchRangeNeg , p.voxelStep);
}
else x = result;
if (isValid && dynControl)
isValid = NewtonMax( p , x , profile_16U, filterBuffer , tempVector , dynSigma , shoch2 ); //NewtonMax(p, x, iProfil); // Punkt genau gefunden?? Ergebnis in x!!!
if (isValid)
{
result = x;
}
return isValid;
#if 0
//printf("\n newton max : %f %d %f %f \n", x , isValid , dynSigma , shoch2);
if (isValid)
if (-x > fSearchRangeNeg || x > fSearchRange)
isValid = false;
while (isValid) // Genaue Bestimmung Nulldurchgang erfolgreich
{
bool dynCorr = false;
if (dynControl)
dynCorr = SetDynSigma(p, x, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); //SetDynSigma(p, x, iProfil);
if (dynCorr)
{
isValid = NewtonMax(p, x, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); //NewtonMax(p, x, iProfil);
dynThreshold = dynSigma / p.sigma * p.threshold; // Auch den Schwellwert anpassen, heuristisch...
if (!isValid)
break;
}
resCanny = Canny(x, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); //p.Canny(x, iProfil);
if ((sign * resCanny < sign * dynThreshold) // Gradientenschwellwert überschritten?
|| (x > fSearchRange)
|| (x < -fSearchRangeNeg))
{
isValid = false;
break;
}
float actGradLength = 0;
bool staticChecked = false;
// Überprüfung mit statischem Schwellwert
if (dynControl)
{
float high, low;
// Gradientensprunglänge und Endpunkte berechnen
actGradLength = GradientLength( p, x, profile_16U, tempConvProfile, filterBuffer, tempVector, ptValid, resCanny, &low, &high, resCanny , dynSigma , shoch2 ); //GradientLength(p, x, iProfil, &low, &high, p.resCanny[iProfil]);
if (low > 0 && high > 0)
staticChecked = true;
if (staticChecked && staticTest > 0)
{
if (staticTest > high || staticTest < low)
{
isValid = false;
break;
}
}
}
// Wenn die Berechnung der Gradientenlänge nicht funktioniert oder dynControl aus ist (Soll-Ist-Vergleich)
if (!staticChecked && staticTest > 0)
{
float lowValue = Gauss(x - 2 * p.sigma, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); //p.Gauss(x - 2 * p.sigma, iProfil);
float highValue = Gauss(x + 2 * p.sigma, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); //p.Gauss(x + 2 * p.sigma, iProfil);
if (lowValue > staticTest || highValue < staticTest)
{
isValid = false;
break;
}
}
// Luftpunkttest
if (airPointsThresh > 0)
{
float grayActual = Gauss( x , p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2); // //p.Gauss(x, iProfil);
if (grayActual < airPointsThresh)
{
isValid = false;
break;
}
}
// Dynamischen p.threshold auf 75% des Maximums dieses Punkts setzen
//Set dynamic p.threshold to 75 % of the maximum of this point
if (p.dynThresholdControl)
dynThreshold = (float)fabs(Canny(x, p, profile_16U, filterBuffer, tempVector , dynSigma , shoch2)) * 3 / 4; //(float)fabs(p.Canny(x, iProfil)) * 3 / 4;
// Aber nicht größer als vorgeg. Schwellwert
//But not bigger than vorg. threshold
if (dynThreshold > p.threshold)
dynThreshold = p.threshold;
ptValid = true;
if (dynControl)
{
if ( resQuality < 0)
resQuality = 0.0;
if ( resCanny < 2 * p.threshold)
resQuality += 25 * (2 * p.threshold - resCanny ) / p.threshold;
actGradLength = __min(actGradLength, 4.0f * dynSigma);
if (actGradLength > 2 * dynSigma)
resQuality += 12 * (actGradLength - 2 * dynSigma) / dynSigma;
}
result = x;
break;
}
if ( !isValid )
ptValid = false;
return isValid;
#endif
}
__global__ void profileGradientMaxima_Kernel( unsigned short* prof, float* kernelData ,
int profileSize , int kernelSize , int numProfiles , float fSearchRange , float fSearchRangeNeg , int wB, float* results , bool* ptValid )
{
int y = threadIdx.y + blockIdx.y * blockDim.y;
int profileId = (y * wB + blockIdx.x) * blockDim.x + threadIdx.x;
if (profileId >= numProfiles)
return;
extern __shared__ float sharedKernelMemory[144];
extern __shared__ ProfileEvaluationConstants profileEvalParams;
//printf("%d \n", sizeof(ProfileEvaluationConstants));
int* profBuff = (int*)&profileEvalParams;
if (threadIdx.x < 16)
{
memcpy(sharedKernelMemory + 2 * threadIdx.x , constGaussKernelData.data + 2 * threadIdx.x , 2 * sizeof(float));
memcpy(sharedKernelMemory + 48 + 2 * threadIdx.x, constCannyKernelData.data + 2 * threadIdx.x, 2 * sizeof(float));
memcpy(sharedKernelMemory + 2 * 48 + 2 * threadIdx.x, constSecDerKernelData.data + 2 * threadIdx.x, 2 * sizeof(float));
}
else
{
memcpy(sharedKernelMemory + 16 + threadIdx.x, constGaussKernelData.data + 16 + threadIdx.x, sizeof(float));
memcpy(sharedKernelMemory + 48 + 16 + threadIdx.x, constCannyKernelData.data + 16 + threadIdx.x, sizeof(float));
memcpy(sharedKernelMemory + 2 * 48 + 16 + threadIdx.x, constSecDerKernelData.data + 16 + threadIdx.x, sizeof(float));
}
memcpy(profBuff + threadIdx.x * 2, profileEvaluatorData.data + 8 * threadIdx.x, 8);//copy 8 byte per threads
__syncthreads();
//printf(" %d \n", profileEvalParams.length);
//CCTProfilsEvaluationSP_Device profileEvaluation;
//auto ped = (unsigned char*)&profileEvaluation;
//int evalSize = sizeof(CCTProfilsEvaluationSP_Device);
//memcpy( ped , profileEvaluatorData.data , evalSize );
extern __shared__ unsigned short profileData[];
unsigned short* currentProfile = profileData + threadIdx.x * profileSize;
//printf(" profile size : %d \n", profileSize);
memcpy( currentProfile , prof + profileId * profileSize, profileSize * sizeof(unsigned short));
float* tempConvolutionData = (float*)( profileData + blockDim.x * profileSize );
float *kernelDataShared = (float*)( tempConvolutionData + blockDim.x * profileEvalParams.tempConvLength );
float* currentConvolutionData = tempConvolutionData + threadIdx.x * profileEvalParams.tempConvLength;
float resQuality, resCanny , result;
bool ptValidLocal;
//profileEvaluation.resQuality = &resQuality;
//profileEvaluation.resCanny = &resCanny;
//profileEvaluation.ptValid = &ptValidLocal;
//profileEvaluation.results = &result;
//profileEvaluation.ptValid[0] = false;
//profileEvaluation.results[0] = 0;
//profileEvaluation.tempConvProfile = currentConvolutionData;
//profileEvaluation.gaussCoeffs = sharedKernelMemory;
//profileEvaluation.cannyCoeffs = sharedKernelMemory + 48;
//profileEvaluation.secDerCoeffs = sharedKernelMemory + 2 * 48;
//profileEvaluation.filterCoeffs = kernelDataShared + threadIdx.x * kernelSize * 2;
//profileEvaluation.tempVector = profileEvaluation.filterCoeffs + kernelSize;
//profileEvaluation.profile_16U = currentProfile;
float xx = 0;
float* filterCoeffs = kernelDataShared + threadIdx.x * kernelSize * 2;
float* tempVector = filterCoeffs + kernelSize;
//ptValid[profileId] = SearchAroundZero( profileEvaluation , xx, 0, fSearchRange, fSearchRangeNeg, -1, -1, true, true);
float dynSigma = profileEvalParams.dynSigma1, shoch2 = profileEvalParams.shoch21;
profileEvalParams.gaussCoeffs = sharedKernelMemory;
profileEvalParams.cannyCoeffs = sharedKernelMemory + 48;
profileEvalParams.secDerCoeffs = sharedKernelMemory + 2 * 48;
//if (threadIdx.x == 0)
//{
// ptValid[profileId] = SearchAroundZero(profileEvaluation, xx, 0, fSearchRange, fSearchRangeNeg, -1, -1, true, true);
// printf("value of xx1 : %f %d \n", xx, ptValid[profileId]);
//float xx = 0;
result = 0;
ptValidLocal = false;
ptValid[profileId] = SearchAroundZero(profileEvalParams, currentProfile, currentConvolutionData, filterCoeffs, tempVector, xx, 0, fSearchRange,
fSearchRangeNeg, -1, -1, true, true, ptValidLocal, resCanny, resQuality, result, dynSigma, shoch2);
results[profileId] = xx;
//printf("value of xx2 : %f %d \n", xx, ptValid[profileId]);
//}
}
__global__ void Simple_Kernel()
{
printf("simple kernel \n");
}
void computeGradientBasedMaximaPoints( void* cpuProfiles , unsigned short* cpuProfileData , float* gaussKernelData ,
float* cannyKernelData , float *secDerKernelData , int numProfiles, int profileLength ,
int tempConvLength , int filterKernelSize , int singleProfileEvaluatorSize ,
int coeffLength , int searchRangeNeg , int zeroIndex , int searchRange )
{
//global memory for storing the profiles
unsigned short* profileMemoryDevice = 0;
//printf(" single profile evaluator size : %d \n", singleProfileEvaluatorSize);
//printf("gaussKernelData : %f %f %f %f %f \n", gaussKernelData[0], gaussKernelData[3], gaussKernelData[7], gaussKernelData[17], gaussKernelData[31]);
//printf("cannyKernelData : %f %f %f %f %f \n", cannyKernelData[0], cannyKernelData[3], cannyKernelData[7], cannyKernelData[17], cannyKernelData[31]);
//printf("secDerCoeffs : %f %f %f %f %f \n", secDerKernelData[0], secDerKernelData[3], secDerKernelData[7], secDerKernelData[17], secDerKernelData[31]);
//printf(" single profile evaluator size %d ", singleProfileEvaluatorSize);
HANDLE_ERROR(cudaMemcpyToSymbol( profileEvaluatorData, cpuProfiles, singleProfileEvaluatorSize, 0 , cudaMemcpyHostToDevice ));
HANDLE_ERROR(cudaMemcpyToSymbol( constGaussKernelData, gaussKernelData, filterKernelSize * sizeof(float), 0, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpyToSymbol( constCannyKernelData, cannyKernelData, filterKernelSize * sizeof(float), 0, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpyToSymbol( constSecDerKernelData, secDerKernelData, filterKernelSize * sizeof(float), 0, cudaMemcpyHostToDevice));
int shift = zeroIndex - searchRangeNeg - coeffLength;
int validLen = (2 * coeffLength + searchRange + searchRangeNeg + 1);
unsigned short* validProfileData = new unsigned short[ validLen * numProfiles ];
for ( int ii = 0; ii < numProfiles; ii++ )
{
memcpy(validProfileData + validLen * ii, cpuProfileData + ii * profileLength + shift , sizeof(unsigned short) * validLen);
}
//cudaMalloc((void**)&profileMemoryDevice, numProfiles * profileLength * sizeof(unsigned short));
cudaMalloc((void**)&profileMemoryDevice, numProfiles * validLen * sizeof(unsigned short));
//cudaMemcpy(profileMemoryDevice, cpuProfiles, singleProfileSize * numProfiles, cudaMemcpyHostToDevice);
//cudaMemcpy(profileMemoryDevice, cpuProfileData, numProfiles * profileLength * sizeof(unsigned short), cudaMemcpyHostToDevice);
cudaMemcpy( profileMemoryDevice, validProfileData, numProfiles * validLen * sizeof(unsigned short), cudaMemcpyHostToDevice);
int groupSize = 32;
dim3 threads(groupSize, 1);
float* resultsGPU;
bool* ptValidGPU;
cudaMalloc( (void**)&resultsGPU, numProfiles * sizeof(float));
cudaMalloc( (void**)&ptValidGPU, numProfiles * sizeof(bool));
int wB = 1024;//1;//
int nProfileSets = numProfiles / groupSize;
int nXBatches = 1;
if ( nProfileSets > wB)
{
nXBatches = nProfileSets % wB == 0 ? nProfileSets / wB : nProfileSets / wB + 1;
}
dim3 blocks(wB , nXBatches );//nXBatches
//tempConvLength = std::max(tempConvLength, 474);
printf("temp convolution length %d : \n", tempConvLength);
int sharedMemorySize = ( ( validLen * sizeof(unsigned short) + tempConvLength * sizeof(float) + 2 * filterKernelSize * sizeof(float)) * groupSize ) + 48 * 3 + 256 ;
printf("shared memory size %d \n ", sharedMemorySize);
//we need all the shared memory for computation
float* variableKernelData;
printf("number of blocks %d \n ", wB * nXBatches);
//Simple_Kernel << <1, 1 >> > ();
int* profileCountGPU;
cudaMalloc( (void**)&profileCountGPU, sizeof(int));
cudaMemset(profileCountGPU, 0, sizeof(int));
profileGradientMaxima_Kernel <<< blocks, threads, sharedMemorySize >>> ( profileMemoryDevice , variableKernelData, validLen ,
filterKernelSize, numProfiles, 40, 40, wB, resultsGPU, ptValidGPU);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("cuda kernel failure\n");
}
else
{
printf("kernel executed successfully \n");
}
HANDLE_ERROR(error);
//HANDLE_ERROR(cudaDeviceSynchronize());
int profileCountCPU = 0;
cudaMemcpy(&profileCountCPU, profileCountGPU, sizeof(int), cudaMemcpyDeviceToHost);
printf("profile count gpu %d , actual number of profiles : %d : ", profileCountCPU, numProfiles);
}
}
}
} |
af6bdfea8705ffd1f7efc205b1d3afe391824656.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
// Must be multiples of 16 for wmma code to work
#define MATRIX_M 16384
#define MATRIX_N 16384
#define MATRIX_K 16384
__global__ void convertFp32ToFp16 (half *out, float *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
int i,j;
float *a_fp32;
float *b_fp32;
half *a_fp16;
half *b_fp16;
float *c;
float *c_cublas;
float *c_cuda;
float *c_host_cublas;
float *c_host_cuda;
FILE * fp = fopen("./cuda.txt","w");
FILE * tp = fopen("./tensor.txt","w");
hiprandGenerator_t gen;
hipblasHandle_t cublasHandle;
hipEvent_t startCUDA;
hipEvent_t stopCUDA;
hipEvent_t startcublas;
hipEvent_t stopcublas;
cudaErrCheck(hipEventCreate(&startCUDA));
cudaErrCheck(hipEventCreate(&stopCUDA));
cudaErrCheck(hipEventCreate(&startcublas));
cudaErrCheck(hipEventCreate(&stopcublas));
cublasErrCheck(hipblasCreate(&cublasHandle));
// 1 CUBLAS_TENSOR_OP_MATH CUBLAS_DEFAULT_MATH .
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH));//
//
cudaErrCheck(hipMalloc((void**)&a_fp32, MATRIX_M * MATRIX_K * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&b_fp32, MATRIX_K * MATRIX_N * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&a_fp16, MATRIX_M * MATRIX_K * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&b_fp16, MATRIX_K * MATRIX_N * sizeof(half)));
cudaErrCheck(hipMalloc((void**)&c, MATRIX_M * MATRIX_N * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&c_cublas, MATRIX_M * MATRIX_N * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&c_cuda, MATRIX_M * MATRIX_N * sizeof(float)));
c_host_cublas = (float*)malloc(MATRIX_M * MATRIX_N * sizeof(float));
c_host_cuda = (float*)malloc(MATRIX_M * MATRIX_N * sizeof(float));
curandErrCheck(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
curandErrCheck(hiprandSetPseudoRandomGeneratorSeed(gen, 1337ULL));
curandErrCheck(hiprandGenerateUniform(gen, a_fp32, MATRIX_M * MATRIX_K));
curandErrCheck(hiprandGenerateUniform(gen, b_fp32, MATRIX_K * MATRIX_N));
// hiprand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
hipLaunchKernelGGL(( convertFp32ToFp16) , dim3((MATRIX_M * MATRIX_K + 255) / 256), dim3(256) , 0, 0, a_fp16, a_fp32, MATRIX_M * MATRIX_K);
hipLaunchKernelGGL(( convertFp32ToFp16) , dim3((MATRIX_K * MATRIX_N + 255) / 256), dim3(256) , 0, 0, b_fp16, b_fp32, MATRIX_K * MATRIX_N);
curandErrCheck(hiprandGenerateUniform(gen, c, MATRIX_M * MATRIX_N));
curandErrCheck(hiprandDestroyGenerator(gen));
cudaErrCheck(hipMemcpy(c_cublas, c, MATRIX_M * MATRIX_N * sizeof(float), hipMemcpyDeviceToDevice));
cudaErrCheck(hipMemcpy(c_cuda, c, MATRIX_M * MATRIX_N * sizeof(float), hipMemcpyDeviceToDevice));
float alpha = 2.0f;
float beta = 2.0f;
printf("\nM = %d, N = %d, K = %d. alpha = %f, beta = %f\n\n", MATRIX_M, MATRIX_N, MATRIX_K, alpha, beta);
// Now using cuBLAS with CUDA
printf("Running with cuBLAS with Cuda Core\n");
cudaErrCheck(hipEventRecord(startCUDA));
// 2
// CUUBLAS_GEMM_DFALT_TENSOR_OP CUBLAS_GEMM_DEFALT .
for(i=0;i<1000;i++){
cublasErrCheck(hipblasGemmEx(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N,
MATRIX_M, MATRIX_N, MATRIX_K,
&alpha,
a_fp32, HIP_R_32F, MATRIX_M,
b_fp32, HIP_R_32F, MATRIX_K,
&beta,
c_cuda, HIP_R_32F, MATRIX_M,
HIP_R_32F, CUBLAS_GEMM_DFALT));
}
cudaErrCheck(hipEventRecord(stopCUDA));
// Error checking
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));//
// Now using cuBLAS with Tensor
printf("Running with cuBLAS with Tensor Core\n");
cudaErrCheck(hipEventRecord(startcublas));
// 2
// CUUBLAS_GEMM_DFALT_TENSOR_OP CUBLAS_GEMM_DFALT .
for(j=0;j<1000;j++){
cublasErrCheck(hipblasGemmEx(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N,
MATRIX_M, MATRIX_N, MATRIX_K,
&alpha,
a_fp16, HIP_R_16F, MATRIX_M,
b_fp16, HIP_R_16F, MATRIX_K,
&beta,
c_cublas, HIP_R_32F, MATRIX_M,
HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
cudaErrCheck(hipEventRecord(stopcublas));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(hipMemcpy(c_host_cuda, c_cuda, MATRIX_M * MATRIX_N * sizeof(float), hipMemcpyDeviceToHost));
cudaErrCheck(hipMemcpy(c_host_cublas, c_cublas, MATRIX_M * MATRIX_N * sizeof(float), hipMemcpyDeviceToHost));
printf("Results.\n\n");
float cudaTime;
float cublasTime;
cudaErrCheck(hipEventSynchronize(stopCUDA));
cudaErrCheck(hipEventSynchronize(stopcublas));
cudaErrCheck(hipEventElapsedTime(&cudaTime, startCUDA, stopCUDA));
cudaErrCheck(hipEventElapsedTime(&cublasTime, startcublas, stopcublas));
// TFLOPS
printf("cuda took %fms\n", cudaTime);
printf("tensor took %fms\n", cublasTime);
printf("\nCUBALS WITH CUDA OR TENSOR CORE CODE !\n\n");
cudaErrCheck(hipEventDestroy(startCUDA));
cudaErrCheck(hipEventDestroy(stopCUDA));
cudaErrCheck(hipEventDestroy(startcublas));
cudaErrCheck(hipEventDestroy(stopcublas));
cudaErrCheck(hipFree(a_fp32));
cudaErrCheck(hipFree(b_fp32));
cudaErrCheck(hipFree(a_fp16));
cudaErrCheck(hipFree(b_fp16));
cudaErrCheck(hipFree(c));
cudaErrCheck(hipFree(c_cublas));
cudaErrCheck(hipFree(c_cuda));
free(c_host_cublas);
free(c_host_cuda);
cudaErrCheck(hipDeviceReset());
return 0;
}
| af6bdfea8705ffd1f7efc205b1d3afe391824656.cu | #include <stdio.h>
#include <curand.h>
#include <cublas_v2.h>
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) {
if (stat != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
// Must be multiples of 16 for wmma code to work
#define MATRIX_M 16384
#define MATRIX_N 16384
#define MATRIX_K 16384
__global__ void convertFp32ToFp16 (half *out, float *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
int i,j;
float *a_fp32;
float *b_fp32;
half *a_fp16;
half *b_fp16;
float *c;
float *c_cublas;
float *c_cuda;
float *c_host_cublas;
float *c_host_cuda;
FILE * fp = fopen("./cuda.txt","w");
FILE * tp = fopen("./tensor.txt","w");
curandGenerator_t gen;
cublasHandle_t cublasHandle;
cudaEvent_t startCUDA;
cudaEvent_t stopCUDA;
cudaEvent_t startcublas;
cudaEvent_t stopcublas;
cudaErrCheck(cudaEventCreate(&startCUDA));
cudaErrCheck(cudaEventCreate(&stopCUDA));
cudaErrCheck(cudaEventCreate(&startcublas));
cudaErrCheck(cudaEventCreate(&stopcublas));
cublasErrCheck(cublasCreate(&cublasHandle));
// 쿠다 코어로 변경 1번 CUBLAS_TENSOR_OP_MATH를 CUBLAS_DEFAULT_MATH로 변경해준다.
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH));//
// 메모리 할당
cudaErrCheck(cudaMalloc((void**)&a_fp32, MATRIX_M * MATRIX_K * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&b_fp32, MATRIX_K * MATRIX_N * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&a_fp16, MATRIX_M * MATRIX_K * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&b_fp16, MATRIX_K * MATRIX_N * sizeof(half)));
cudaErrCheck(cudaMalloc((void**)&c, MATRIX_M * MATRIX_N * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&c_cublas, MATRIX_M * MATRIX_N * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&c_cuda, MATRIX_M * MATRIX_N * sizeof(float)));
c_host_cublas = (float*)malloc(MATRIX_M * MATRIX_N * sizeof(float));
c_host_cuda = (float*)malloc(MATRIX_M * MATRIX_N * sizeof(float));
curandErrCheck(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
curandErrCheck(curandSetPseudoRandomGeneratorSeed(gen, 1337ULL));
curandErrCheck(curandGenerateUniform(gen, a_fp32, MATRIX_M * MATRIX_K));
curandErrCheck(curandGenerateUniform(gen, b_fp32, MATRIX_K * MATRIX_N));
// curand doesn't currently support fp16 so we generate in fp32 and convert to fp16.
convertFp32ToFp16 <<< (MATRIX_M * MATRIX_K + 255) / 256, 256 >>> (a_fp16, a_fp32, MATRIX_M * MATRIX_K);
convertFp32ToFp16 <<< (MATRIX_K * MATRIX_N + 255) / 256, 256 >>> (b_fp16, b_fp32, MATRIX_K * MATRIX_N);
curandErrCheck(curandGenerateUniform(gen, c, MATRIX_M * MATRIX_N));
curandErrCheck(curandDestroyGenerator(gen));
cudaErrCheck(cudaMemcpy(c_cublas, c, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyDeviceToDevice));
cudaErrCheck(cudaMemcpy(c_cuda, c, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyDeviceToDevice));
float alpha = 2.0f;
float beta = 2.0f;
printf("\nM = %d, N = %d, K = %d. alpha = %f, beta = %f\n\n", MATRIX_M, MATRIX_N, MATRIX_K, alpha, beta);
// Now using cuBLAS with CUDA
printf("Running with cuBLAS with Cuda Core\n");
cudaErrCheck(cudaEventRecord(startCUDA));
// 쿠다 코어로 변경 2번
// 쿠다 코어 이용시에 CUUBLAS_GEMM_DFALT_TENSOR_OP을 CUBLAS_GEMM_DEFALT 로 변경해준다.
for(i=0;i<1000;i++){
cublasErrCheck(cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
MATRIX_M, MATRIX_N, MATRIX_K,
&alpha,
a_fp32, CUDA_R_32F, MATRIX_M,
b_fp32, CUDA_R_32F, MATRIX_K,
&beta,
c_cuda, CUDA_R_32F, MATRIX_M,
CUDA_R_32F, CUBLAS_GEMM_DFALT));
}
cudaErrCheck(cudaEventRecord(stopCUDA));
// Error checking
cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH));//
// Now using cuBLAS with Tensor
printf("Running with cuBLAS with Tensor Core\n");
cudaErrCheck(cudaEventRecord(startcublas));
// 쿠다 코어로 변경 2번
// 쿠다 코어 이용시에 CUUBLAS_GEMM_DFALT_TENSOR_OP을 CUBLAS_GEMM_DFALT 로 변경해준다.
for(j=0;j<1000;j++){
cublasErrCheck(cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
MATRIX_M, MATRIX_N, MATRIX_K,
&alpha,
a_fp16, CUDA_R_16F, MATRIX_M,
b_fp16, CUDA_R_16F, MATRIX_K,
&beta,
c_cublas, CUDA_R_32F, MATRIX_M,
CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP));
}
cudaErrCheck(cudaEventRecord(stopcublas));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(cudaMemcpy(c_host_cuda, c_cuda, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyDeviceToHost));
cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyDeviceToHost));
printf("Results.\n\n");
float cudaTime;
float cublasTime;
cudaErrCheck(cudaEventSynchronize(stopCUDA));
cudaErrCheck(cudaEventSynchronize(stopcublas));
cudaErrCheck(cudaEventElapsedTime(&cudaTime, startCUDA, stopCUDA));
cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas));
// TFLOPS 계산 결과 출력
printf("cuda took %fms\n", cudaTime);
printf("tensor took %fms\n", cublasTime);
printf("\nCUBALS WITH CUDA OR TENSOR CORE CODE !\n\n");
cudaErrCheck(cudaEventDestroy(startCUDA));
cudaErrCheck(cudaEventDestroy(stopCUDA));
cudaErrCheck(cudaEventDestroy(startcublas));
cudaErrCheck(cudaEventDestroy(stopcublas));
cudaErrCheck(cudaFree(a_fp32));
cudaErrCheck(cudaFree(b_fp32));
cudaErrCheck(cudaFree(a_fp16));
cudaErrCheck(cudaFree(b_fp16));
cudaErrCheck(cudaFree(c));
cudaErrCheck(cudaFree(c_cublas));
cudaErrCheck(cudaFree(c_cuda));
free(c_host_cublas);
free(c_host_cuda);
cudaErrCheck(cudaDeviceReset());
return 0;
}
|
f120a296df808aff20cfcbb29cea33cdb1305535.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = y[i]+x[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-2.0f));
printf("Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
}
| f120a296df808aff20cfcbb29cea33cdb1305535.cu | #include <cuda.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = y[i]+x[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-2.0f));
printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
|
9d90eb78d0d3abc2cff65ab841a753731edcfff6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#include <stdio.h>
#include<cuda.h>
#include<iostream>
#include<cmath>
#include<time.h>
#define TILE_WIDTH 4//block size
double duration_gpu, duration_cpu, duration_kernel, duration_cpumem;
using namespace std;
__host__
void matrix_mul_seq(double* a, double* b, double* p, int r1, int w, int c2)
{
clock_t start = clock();
for (int i = 0; i < r1; i++)
for (int j = 0; j < c2; j++) {
double sum = 0;
for (int k = 0; k < w; k++) {
double x = a[i *w + k];
double y = b[k * c2 + j];
sum += x * y;
}
p[i * c2 + j] = sum;
}
clock_t stop = clock();
duration_cpu = (double)(stop - start) / CLOCKS_PER_SEC;
cout << " time spent by cpu in seconds : " << duration_cpu << endl;
}
__global__
void MatrixMulKernel(double* M, double* N,
double* P, int Width, int r1, int c2)
{
// Calculate the row index of the P element and M
int i = blockIdx.y * blockDim.y + threadIdx.y;
// Calculate the column index of P and N
int j = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ double Ms[TILE_WIDTH][TILE_WIDTH];
__shared__ double Ns[TILE_WIDTH][TILE_WIDTH];
for (int Row = i; Row < gridDim.y * blockDim.y * 2 + threadIdx.y; Row += gridDim.y * blockDim.y) {
for (int Col = j; Col < gridDim.x * blockDim.x * 2 + threadIdx.x; Col += gridDim.x * blockDim.x) {
double Pvalue = 0;
// Each thread computes one element of the block sub-matrix
for (int k = 0; k < (Width + TILE_WIDTH - 1) / TILE_WIDTH; ++k) {
if (k * TILE_WIDTH + threadIdx.x < Width && Row < r1)
Ms[threadIdx.y][threadIdx.x] = M[Row * Width + k * TILE_WIDTH + threadIdx.x];
else
Ms[threadIdx.y][threadIdx.x] = 0.0;
if (k * TILE_WIDTH + threadIdx.y < Width && Col < c2)
Ns[threadIdx.y][threadIdx.x] = N[(k * TILE_WIDTH + threadIdx.y) * c2 + Col];
else
Ns[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < TILE_WIDTH; n++) {
//if (n + (k * TILE_WIDTH) < Width)
Pvalue += Ms[threadIdx.y][n] * Ns[n][threadIdx.x];
}
__syncthreads();
}
if (Row < r1 && Col < c2)
{
P[Row * c2 + Col] = Pvalue;
}
}
}
}
void matrix_mul_parallel(double* h_a, double* h_b, double* h_p, int r1, int w, int c2)
{
int size_a = r1 * w * sizeof(double);
int size_b = w * c2 * sizeof(double);
int size_p = r1 * c2 * sizeof(double);
double* d_a, *d_b, *d_p;
hipError_t err = hipMalloc((void**)&d_a, size_a);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_a, h_a, size_a, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void**)&d_b, size_b);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_b, h_b, size_b, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void**)&d_p, size_p);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid(ceil(c2 / float(dimBlock.x * 2)), (ceil(r1 / float(dimBlock.y * 2))));
clock_t start = clock();
MatrixMulKernel << <dimGrid, dimBlock >> > (d_a, d_b, d_p, w, r1, c2);
hipDeviceSynchronize();
clock_t stop = clock();
duration_kernel = (double)(stop - start) / CLOCKS_PER_SEC;
cout << " time spent by the kernel in seconds : " << duration_kernel << endl;
err = hipMemcpy(h_p, d_p, size_p, hipMemcpyDeviceToHost);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipFree(d_a); hipFree(d_b); hipFree(d_p);
}
int main()
{
srand(unsigned(time(0)));
int r1, w, c2;
cout << "Enter rows for first matrix: ";
cin >> r1;
cout << "Enter columns of first matrix which is the same as rows for second matrix: ";
cin >> w;
cout << "Enter columns for second matrix: ";
cin >> c2;
int size_a = r1 * w;
int size_b = w * c2;
int size_p = r1 * c2;
clock_t start = clock();
double* a = new double[size_a];
double* b = new double[size_b];
double* p = new double[size_p];
double* d_p = new double[size_p];
clock_t stop = clock();
duration_cpumem = (double)(stop - start) / CLOCKS_PER_SEC;
// initializing elements of first matrix.
for (int i = 0; i < r1; i++)
for (int j = 0; j < w; j++)
{
a[i * w + j] = double(rand()) / (double(RAND_MAX / LLONG_MAX) + 1.0);
}
// initializing elements of second matrix.
srand(unsigned(time(0)));
for (int i = 0; i < w; i++)
for (int j = 0; j < c2; j++)
{
b[i * c2 + j] = double(rand()) / (double(RAND_MAX / LLONG_MAX) + 1.0);
}
// Initializing elements of matrix p to 0.
for (int i = 0; i < r1; i++)
for (int j = 0; j < c2; j++)
{
p[i * c2 + j] = 0;
}
//calling the sequential function
matrix_mul_seq(a, b, p, r1, w, c2);
duration_cpumem += duration_cpu;
cout << " time spent by the CPU with memory : " << duration_cpumem << endl;
// calling the parallel function
start = clock();
matrix_mul_parallel(a, b, d_p, r1, w, c2);
hipDeviceSynchronize();
stop = clock();
duration_gpu = (double)(stop - start) / CLOCKS_PER_SEC;
cout << " time spent by the kernel in seconds : " << duration_gpu << endl;
unsigned long long int counter = 0;;
for (int i = 0; i < r1; ++i)
for (int j = 0; j < c2; ++j)
{
counter += (d_p[i * c2 + j] != p[i * c2 + j]);
}
printf("There are %ld different elements\n", counter);
printf("speedup without memory: %lf\n", duration_cpu / duration_kernel);
printf("speedup with memory: %lf\n", duration_cpumem / duration_gpu);
system("pause");
return 0;
} | 9d90eb78d0d3abc2cff65ab841a753731edcfff6.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <stdio.h>
#include<cuda.h>
#include<iostream>
#include<cmath>
#include<time.h>
#define TILE_WIDTH 4//block size
double duration_gpu, duration_cpu, duration_kernel, duration_cpumem;
using namespace std;
__host__
void matrix_mul_seq(double* a, double* b, double* p, int r1, int w, int c2)
{
clock_t start = clock();
for (int i = 0; i < r1; i++)
for (int j = 0; j < c2; j++) {
double sum = 0;
for (int k = 0; k < w; k++) {
double x = a[i *w + k];
double y = b[k * c2 + j];
sum += x * y;
}
p[i * c2 + j] = sum;
}
clock_t stop = clock();
duration_cpu = (double)(stop - start) / CLOCKS_PER_SEC;
cout << " time spent by cpu in seconds : " << duration_cpu << endl;
}
__global__
void MatrixMulKernel(double* M, double* N,
double* P, int Width, int r1, int c2)
{
// Calculate the row index of the P element and M
int i = blockIdx.y * blockDim.y + threadIdx.y;
// Calculate the column index of P and N
int j = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ double Ms[TILE_WIDTH][TILE_WIDTH];
__shared__ double Ns[TILE_WIDTH][TILE_WIDTH];
for (int Row = i; Row < gridDim.y * blockDim.y * 2 + threadIdx.y; Row += gridDim.y * blockDim.y) {
for (int Col = j; Col < gridDim.x * blockDim.x * 2 + threadIdx.x; Col += gridDim.x * blockDim.x) {
double Pvalue = 0;
// Each thread computes one element of the block sub-matrix
for (int k = 0; k < (Width + TILE_WIDTH - 1) / TILE_WIDTH; ++k) {
if (k * TILE_WIDTH + threadIdx.x < Width && Row < r1)
Ms[threadIdx.y][threadIdx.x] = M[Row * Width + k * TILE_WIDTH + threadIdx.x];
else
Ms[threadIdx.y][threadIdx.x] = 0.0;
if (k * TILE_WIDTH + threadIdx.y < Width && Col < c2)
Ns[threadIdx.y][threadIdx.x] = N[(k * TILE_WIDTH + threadIdx.y) * c2 + Col];
else
Ns[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < TILE_WIDTH; n++) {
//if (n + (k * TILE_WIDTH) < Width)
Pvalue += Ms[threadIdx.y][n] * Ns[n][threadIdx.x];
}
__syncthreads();
}
if (Row < r1 && Col < c2)
{
P[Row * c2 + Col] = Pvalue;
}
}
}
}
void matrix_mul_parallel(double* h_a, double* h_b, double* h_p, int r1, int w, int c2)
{
int size_a = r1 * w * sizeof(double);
int size_b = w * c2 * sizeof(double);
int size_p = r1 * c2 * sizeof(double);
double* d_a, *d_b, *d_p;
cudaError_t err = cudaMalloc((void**)&d_a, size_a);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_a, h_a, size_a, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_b, size_b);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_b, h_b, size_b, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_p, size_p);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid(ceil(c2 / float(dimBlock.x * 2)), (ceil(r1 / float(dimBlock.y * 2))));
clock_t start = clock();
MatrixMulKernel << <dimGrid, dimBlock >> > (d_a, d_b, d_p, w, r1, c2);
cudaDeviceSynchronize();
clock_t stop = clock();
duration_kernel = (double)(stop - start) / CLOCKS_PER_SEC;
cout << " time spent by the kernel in seconds : " << duration_kernel << endl;
err = cudaMemcpy(h_p, d_p, size_p, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaFree(d_a); cudaFree(d_b); cudaFree(d_p);
}
int main()
{
srand(unsigned(time(0)));
int r1, w, c2;
cout << "Enter rows for first matrix: ";
cin >> r1;
cout << "Enter columns of first matrix which is the same as rows for second matrix: ";
cin >> w;
cout << "Enter columns for second matrix: ";
cin >> c2;
int size_a = r1 * w;
int size_b = w * c2;
int size_p = r1 * c2;
clock_t start = clock();
double* a = new double[size_a];
double* b = new double[size_b];
double* p = new double[size_p];
double* d_p = new double[size_p];
clock_t stop = clock();
duration_cpumem = (double)(stop - start) / CLOCKS_PER_SEC;
// initializing elements of first matrix.
for (int i = 0; i < r1; i++)
for (int j = 0; j < w; j++)
{
a[i * w + j] = double(rand()) / (double(RAND_MAX / LLONG_MAX) + 1.0);
}
// initializing elements of second matrix.
srand(unsigned(time(0)));
for (int i = 0; i < w; i++)
for (int j = 0; j < c2; j++)
{
b[i * c2 + j] = double(rand()) / (double(RAND_MAX / LLONG_MAX) + 1.0);
}
// Initializing elements of matrix p to 0.
for (int i = 0; i < r1; i++)
for (int j = 0; j < c2; j++)
{
p[i * c2 + j] = 0;
}
//calling the sequential function
matrix_mul_seq(a, b, p, r1, w, c2);
duration_cpumem += duration_cpu;
cout << " time spent by the CPU with memory : " << duration_cpumem << endl;
// calling the parallel function
start = clock();
matrix_mul_parallel(a, b, d_p, r1, w, c2);
cudaDeviceSynchronize();
stop = clock();
duration_gpu = (double)(stop - start) / CLOCKS_PER_SEC;
cout << " time spent by the kernel in seconds : " << duration_gpu << endl;
unsigned long long int counter = 0;;
for (int i = 0; i < r1; ++i)
for (int j = 0; j < c2; ++j)
{
counter += (d_p[i * c2 + j] != p[i * c2 + j]);
}
printf("There are %ld different elements\n", counter);
printf("speedup without memory: %lf\n", duration_cpu / duration_kernel);
printf("speedup with memory: %lf\n", duration_cpumem / duration_gpu);
system("pause");
return 0;
} |
deafa5d16c7454ffc8adacd4dd153e8005ff57a4.hip | // !!! This is a file automatically generated by hipify!!!
#define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include "layer.h"
#include <hip/hip_runtime.h>
#include <cstdio>
#include <time.h>
#include <iostream>
#include <string>
#include <chrono>
#include <vector>
#include <omp.h>
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
static std::vector<Layer*> l_input;
static std::vector<Layer*> l_c1;
static std::vector<Layer*> l_c2;
static std::vector<Layer*> l_c3;
static std::vector<Layer*> l_f;
static std::vector<Layer*> l_r;
float* l_c1_weight;
float* l_c2_weight;
float* l_c3_weight;
float* l_f_weight;
float* l_r_weight;
float* l_c1_bias;
float* l_c2_bias;
float* l_c3_bias;
float* l_f_bias;
float* l_r_bias;
int deviceCount = 0;
static void learn();
static int* classify(float input[data_per_node][28][28], int tid);
static void test(int tid);
static double forward_pass(float input[data_per_node][28][28], int tid);
static double back_pass(int tid);
__global__ void weight_update(float* dest, float* d_weight, int N, int deviceCount)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
if(pos < N * (deviceCount-1)){
int idx = pos % N;
atomicAdd(&dest[idx], d_weight[pos]);
}
}
__global__ void bias_update(float* dest, float* bias, int N, int deviceCount)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
if(pos < N * (deviceCount-1)){
int idx = pos % N;
atomicAdd(&dest[idx], bias[pos]);
}
}
__global__ void weight_average(float* weight, int N, int deviceCount)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
if(pos < N){
weight[pos] /= deviceCount;
}
}
static inline void loaddata()
{
mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
}
int main(int argc, const char **argv)
{
srand(time(NULL));
fprintf(stdout ,"begin\n");
hipError_t err = hipInit(0);
if (err != hipSuccess) {
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err);
return 1;
}
loaddata();
learn();
fprintf(stdout, "begin test");
test(0);
return 0;
}
// Forward propagation of a single row in dataset
static double forward_pass(float input[data_per_node][28][28], int tid)
{
l_input[tid]->clear();
l_c1[tid]->clear();
l_c2[tid]->clear();
l_c3[tid]->clear();
l_f[tid]->clear();
l_r[tid]->clear();
clock_t start, end;
start = clock();
l_input[tid]->setOutput((float *)input);
hipLaunchKernelGGL(( fp_preact_c1), dim3(2048), dim3(1024), 0, 0, (float (*)[28][28])l_input[tid]->output, (float (*)[6][24][24])l_c1[tid]->preact, (float (*)[5][5])l_c1[tid]->weight);
hipLaunchKernelGGL(( fp_bias_c1), dim3(2048), dim3(1024), 0, 0, (float (*)[6][24][24])l_c1[tid]->preact, l_c1[tid]->bias);
hipLaunchKernelGGL(( apply_sigmoid), dim3(2048), dim3(1024), 0, 0, l_c1[tid]->preact, l_c1[tid]->output, l_c1[tid]->O);
hipLaunchKernelGGL(( fp_preact_r), dim3(2048), dim3(1024), 0, 0, (float (*)[6][24][24])l_c1[tid]->preact, (float (*)[6][6][6])l_r[tid]->preact, (float (*)[4][4])l_r[tid]->weight);
hipLaunchKernelGGL(( fp_bias_r), dim3(2048), dim3(1024), 0, 0, (float (*)[6][6][6])l_r[tid]->preact, l_r[tid]->bias);
hipLaunchKernelGGL(( fp_preact_c2), dim3(2048), dim3(1024), 0, 0, (float (*)[6][24][24])l_c1[tid]->output, (float (*)[6][12][12])l_c2[tid]->preact, (float (*)[2][2])l_c2[tid]->weight);
hipLaunchKernelGGL(( fp_bias_c2), dim3(2048), dim3(1024), 0, 0, (float (*)[6][12][12])l_c2[tid]->preact, l_c2[tid]->bias);
hipLaunchKernelGGL(( apply_sigmoid), dim3(2048), dim3(1024), 0, 0, l_c2[tid]->preact, l_c2[tid]->output, l_c2[tid]->O);
hipLaunchKernelGGL(( fp_preact_c3), dim3(2048), dim3(1024), 0, 0, (float (*)[6][12][12])l_c2[tid]->output, (float (*)[6][6][6])l_c3[tid]->preact, (float (*)[2][2])l_c3[tid]->weight);
hipLaunchKernelGGL(( fp_bias_c3), dim3(2048), dim3(1024), 0, 0, (float (*)[6][6][6])l_c3[tid]->preact, l_c3[tid]->bias);
hipLaunchKernelGGL(( fp_add_res), dim3(2048), dim3(1024), 0, 0, (float (*)[6][6][6])l_c3[tid]->preact, (float (*)[6][6][6])l_r[tid]->preact);
hipLaunchKernelGGL(( apply_sigmoid), dim3(2048), dim3(1024), 0, 0, l_c3[tid]->preact, l_c3[tid]->output, l_c3[tid]->O);
hipLaunchKernelGGL(( fp_preact_f), dim3(2048), dim3(1024), 0, 0, (float (*)[6][6][6])l_c3[tid]->output, (float (*)[10])l_f[tid]->preact, (float (*)[6][6][6])l_f[tid]->weight);
hipLaunchKernelGGL(( fp_bias_f), dim3(2048), dim3(1024), 0, 0, (float (*)[10])l_f[tid]->preact, l_f[tid]->bias);
hipLaunchKernelGGL(( apply_sigmoid), dim3(2048), dim3(1024), 0, 0, l_f[tid]->preact, l_f[tid]->output, l_f[tid]->O);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Back propagation to update weights
static double back_pass(int tid)
{
// fprintf(stdout, "start backward\n");
//fprintf(stdout, "\n here \n");
clock_t start, end;
start = clock();
hipLaunchKernelGGL(( bp_weight_f), dim3(2048), dim3(1024), 0, 0, (float (*)[6][6][6])l_f[tid]->d_weight, (float (*)[10])l_f[tid]->d_preact, (float (*)[6][6][6])l_c3[tid]->output);
hipLaunchKernelGGL(( bp_bias_f), dim3(2048), dim3(1024), 0, 0, l_f[tid]->bias, (float (*)[10])l_f[tid]->d_preact);
hipLaunchKernelGGL(( bp_output_c3), dim3(2048), dim3(1024), 0, 0, (float (*)[6][6][6])l_c3[tid]->d_output, (float (*)[6][6][6])l_f[tid]->weight, (float (*)[10])l_f[tid]->d_preact);
hipLaunchKernelGGL(( bp_preact_c3), dim3(2048), dim3(1024), 0, 0, (float (*)[6][6][6])l_c3[tid]->d_preact, (float (*)[6][6][6])l_c3[tid]->d_output, (float (*)[6][6][6])l_c3[tid]->preact);
hipLaunchKernelGGL(( bp_weight_c3), dim3(2048), dim3(1024), 0, 0, (float (*)[2][2])l_c3[tid]->d_weight, (float (*)[6][6][6])l_c3[tid]->d_preact, (float (*)[6][12][12])l_c2[tid]->output);
hipLaunchKernelGGL(( bp_bias_c3), dim3(2048), dim3(1024), 0, 0, l_c3[tid]->bias, (float (*)[6][6][6])l_c3[tid]->d_preact);
hipLaunchKernelGGL(( bp_output_c2), dim3(2048), dim3(1024), 0, 0, (float (*)[6][12][12])l_c2[tid]->d_output, (float (*)[2][2])l_c3[tid]->weight, (float (*)[6][6][6])l_c3[tid]->d_preact);
hipLaunchKernelGGL(( bp_preact_c2), dim3(2048), dim3(1024), 0, 0, (float (*)[6][12][12])l_c2[tid]->d_preact, (float (*)[6][12][12])l_c2[tid]->d_output, (float (*)[6][12][12])l_c2[tid]->preact);
hipLaunchKernelGGL(( bp_weight_c2), dim3(2048), dim3(1024), 0, 0, (float (*)[2][2])l_c2[tid]->d_weight, (float (*)[6][12][12])l_c2[tid]->d_preact, (float (*)[6][24][24])l_c1[tid]->output);
hipLaunchKernelGGL(( bp_bias_c2), dim3(2048), dim3(1024), 0, 0, l_c2[tid]->bias, (float (*)[6][12][12])l_c2[tid]->d_preact);
hipLaunchKernelGGL(( bp_output_c1), dim3(2048), dim3(1024), 0, 0, (float (*)[6][24][24])l_c1[tid]->d_output, (float (*)[2][2])l_c2[tid]->weight, (float (*)[6][12][12])l_c2[tid]->d_preact);
hipLaunchKernelGGL(( bp_preact_c1), dim3(2048), dim3(1024), 0, 0, (float (*)[6][24][24])l_c1[tid]->d_preact, (float (*)[6][24][24])l_c1[tid]->d_output, (float (*)[6][24][24])l_c1[tid]->preact);
hipLaunchKernelGGL(( bp_weight_c1), dim3(2048), dim3(1024), 0, 0, (float (*)[5][5])l_c1[tid]->d_weight, (float (*)[6][24][24])l_c1[tid]->d_preact, (float (*)[28][28])l_input[tid]->output);
hipLaunchKernelGGL(( bp_bias_c1), dim3(2048), dim3(1024), 0, 0, l_c1[tid]->bias, (float (*)[6][24][24])l_c1[tid]->d_preact);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Unfold the input layer
static void unfold_input(double input[28][28], double unfolded[24*24][5*5])
{
int a = 0;
(void)unfold_input;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j) {
int b = 0;
for (int x = i; x < i + 2; ++x)
for (int y = j; y < j+2; ++y)
unfolded[a][b++] = input[x][y];
a++;
}
}
static void learn()
{
// train_cnt = 15000;
hipGetDeviceCount(&deviceCount);
omp_set_num_threads(deviceCount);
hipblasHandle_t blas[deviceCount];
l_input = std::vector<Layer*>(deviceCount);
l_c1 = std::vector<Layer*>(deviceCount);
l_c2 = std::vector<Layer*>(deviceCount);
l_c3 = std::vector<Layer*>(deviceCount);
l_f = std::vector<Layer*>(deviceCount);
l_r = std::vector<Layer*>(deviceCount);
float err;
int iter = 20;
double time_taken = 0.0;
fprintf(stdout ,"Learning\n");
// int canAccess = 0;
// for(int i = 1; i < deviceCount; i++){
// hipDeviceCanAccessPeer(&canAccess, 0, i);
// std::cout << canAccess << std::endl;
// hipDeviceCanAccessPeer(&canAccess, i, 0);
// std::cout << canAccess << std::endl;
// }
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
l_input[i] = new Layer(0, 0, 28*28 * data_per_node);
l_c1[i] = new Layer(5*5, 6, 24*24*6 * data_per_node);
l_c2[i] = new Layer(2*2, 6, 12*12*6 * data_per_node);
l_c3[i] = new Layer(2*2, 6, 6*6*6 * data_per_node);
l_f[i] = new Layer(6*6*6, 10, 10 * data_per_node);
l_r[i] = new Layer(4*4,1,6*6*6 * data_per_node);
hipblasCreate(&blas[i]);
if(i == 0){
l_c1_weight = new float[5*5*6*(deviceCount-1)];
l_c2_weight = new float[2*2*6*(deviceCount-1)];
// l_c3_weight = new float[2*2*6*(deviceCount-1)];
l_f_weight = new float[6*6*6*10*(deviceCount-1)];
// l_r_weight = new float[4*4*1*(deviceCount-1)];
l_c1_bias = new float[6*(deviceCount-1)];
l_c2_bias = new float[6*(deviceCount-1)];
// l_c3_bias = new float[6*(deviceCount-1)];
l_f_bias = new float[10*(deviceCount-1)];
// l_r_bias = new float[1*(deviceCount-1)];
hipMalloc(&l_c1_weight, sizeof(float) * 5*5*6*(deviceCount-1));
hipMalloc(&l_c2_weight, sizeof(float) * 2*2*6*(deviceCount-1));
// hipMalloc(&l_c3_weight, sizeof(float) * 2*2*6*(deviceCount-1));
hipMalloc(&l_f_weight, sizeof(float) * 6*6*6*10*(deviceCount-1));
// hipMalloc(&l_r_weight, sizeof(float) * 4*4*1*(deviceCount-1));
hipMalloc(&l_c1_bias, sizeof(float) * 6*(deviceCount-1));
hipMalloc(&l_c2_bias, sizeof(float) * 6*(deviceCount-1));
// hipMalloc(&l_c3_bias, sizeof(float) * 6*(deviceCount-1));
hipMalloc(&l_f_bias, sizeof(float) * 10*(deviceCount-1));
// hipMalloc(&l_r_bias, sizeof(float) * 1*(deviceCount-1));
}
}
hipDeviceSynchronize();
auto start_time = std::chrono::steady_clock::now();
while (iter < 0 || iter-- > 0) {
#pragma omp parallel num_threads(deviceCount)
{
err = 0.0f;
int tid = omp_get_thread_num();
hipSetDevice(tid);
unsigned int* Y;
hipMalloc(&Y, sizeof(unsigned int) * data_per_node);
int batch_cnt = train_cnt / data_per_node;
for (int q = 0; q < batch_cnt; q+=deviceCount) {
float tmp_err;
int p = q + tid;
float input[data_per_node][28][28];
unsigned int Y_host[data_per_node] = {0};
for(int k = 0; k < data_per_node; k++){
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[k][i][j] = (train_set[p * data_per_node + k].data)[i][j];
}
}
Y_host[k] = train_set[p * data_per_node + k].label;
}
time_taken += forward_pass(input, tid);
l_f[tid]->bp_clear();
l_c2[tid]->bp_clear();
l_c1[tid]->bp_clear();
l_c3[tid]->bp_clear();
// hipMemset(Y, 0, sizeof(unsigned int) * data_per_node);
hipMemcpy(Y, Y_host, sizeof(unsigned int) * data_per_node, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( makeError), dim3(data_per_node), dim3(10), 0, 0, l_f[tid]->d_preact, l_f[tid]->output, Y, 10 * data_per_node);
hipblasSnrm2(blas[tid], 10 * data_per_node, l_f[tid]->d_preact, 1, &tmp_err);
err += tmp_err;
time_taken += back_pass(tid);
// fprintf(stdout, "device %d, finish iter %d \n", tid, p);
#pragma omp barrier
if(deviceCount == 1){
hipLaunchKernelGGL(( apply_grad), dim3(2048), dim3(1024), 0, 0, l_f[tid]->weight, l_f[tid]->d_weight, l_f[tid]->M * l_f[tid]->N);
hipLaunchKernelGGL(( apply_grad), dim3(2048), dim3(1024), 0, 0, l_c2[tid]->weight, l_c2[tid]->d_weight, l_c2[tid]->M * l_c2[tid]->N);
hipLaunchKernelGGL(( apply_grad), dim3(2048), dim3(1024), 0, 0, l_c1[tid]->weight, l_c1[tid]->d_weight, l_c1[tid]->M * l_c1[tid]->N);
}
else{
int tid = omp_get_thread_num();
hipSetDevice(tid);
if(tid != 0){
hipMemcpyPeer(&l_c1_weight[(tid-1) * l_c1[tid]->M * l_c1[tid]->N], 0, l_c1[tid]->d_weight, tid, sizeof(float) * l_c1[tid]->M * l_c1[tid]->N);
hipMemcpyPeer(&l_c2_weight[(tid-1) * l_c2[tid]->M * l_c2[tid]->N], 0, l_c2[tid]->d_weight, tid, sizeof(float) * l_c2[tid]->M * l_c2[tid]->N);
hipMemcpyPeer(&l_f_weight[(tid-1) * l_f[tid]->M * l_f[tid]->N], 0, l_f[tid]->d_weight, tid, sizeof(float) * l_f[tid]->M * l_f[tid]->N);
hipMemcpyPeer(&l_c1_bias[(tid-1) * l_c1[tid]->N], 0, l_c1[tid]->bias, tid, sizeof(float) * l_c1[tid]->N);
hipMemcpyPeer(&l_c2_bias[(tid-1) * l_c2[tid]->N], 0, l_c2[tid]->bias, tid, sizeof(float) * l_c2[tid]->N);
hipMemcpyPeer(&l_f_bias[(tid-1) * l_f[tid]->N], 0, l_f[tid]->bias, tid, sizeof(float) * l_f[tid]->N);
}
#pragma omp barrier
if(tid == 0){
hipLaunchKernelGGL(( weight_update), dim3(2048), dim3(1024), 0, 0, l_c1[tid]->d_weight, l_c1_weight, l_c1[tid]->M * l_c1[tid]->N, deviceCount);
hipLaunchKernelGGL(( weight_update), dim3(2048), dim3(1024), 0, 0, l_c2[tid]->d_weight, l_c2_weight, l_c2[tid]->M * l_c2[tid]->N, deviceCount);
hipLaunchKernelGGL(( weight_update), dim3(2048), dim3(1024), 0, 0, l_f[tid]->d_weight, l_f_weight, l_f[tid]->M * l_f[tid]->N, deviceCount);
hipLaunchKernelGGL(( weight_average), dim3(2048), dim3(1024), 0, 0, l_c1[tid]->d_weight, l_c1[tid]->M * l_c1[tid]->N, deviceCount);
hipLaunchKernelGGL(( weight_average), dim3(2048), dim3(1024), 0, 0, l_c2[tid]->d_weight, l_c2[tid]->M * l_c2[tid]->N, deviceCount);
hipLaunchKernelGGL(( weight_average), dim3(2048), dim3(1024), 0, 0, l_f[tid]->d_weight, l_f[tid]->M * l_f[tid]->N, deviceCount);
hipLaunchKernelGGL(( apply_grad), dim3(2048), dim3(1024), 0, 0, l_f[tid]->weight, l_f[tid]->d_weight, l_f[tid]->M * l_f[tid]->N);
hipLaunchKernelGGL(( apply_grad), dim3(2048), dim3(1024), 0, 0, l_c2[tid]->weight, l_c2[tid]->d_weight, l_c2[tid]->M * l_c2[tid]->N);
hipLaunchKernelGGL(( apply_grad), dim3(2048), dim3(1024), 0, 0, l_c1[tid]->weight, l_c1[tid]->d_weight, l_c1[tid]->M * l_c1[tid]->N);
hipLaunchKernelGGL(( bias_update), dim3(2048), dim3(1024), 0, 0, l_c1[tid]->bias, &l_c1_bias[tid * l_c1[tid]->N], l_c1[tid]->N, deviceCount);
hipLaunchKernelGGL(( bias_update), dim3(2048), dim3(1024), 0, 0, l_c2[tid]->bias, &l_c2_bias[tid * l_c2[tid]->N], l_c2[tid]->N, deviceCount);
hipLaunchKernelGGL(( bias_update), dim3(2048), dim3(1024), 0, 0, l_f[tid]->bias, &l_f_bias[tid * l_f[tid]->N], l_f[tid]->N, deviceCount);
hipLaunchKernelGGL(( weight_average), dim3(2048), dim3(1024), 0, 0, l_c1[tid]->bias, l_c1[tid]->N, deviceCount);
hipLaunchKernelGGL(( weight_average), dim3(2048), dim3(1024), 0, 0, l_c2[tid]->bias, l_c2[tid]->N, deviceCount);
hipLaunchKernelGGL(( weight_average), dim3(2048), dim3(1024), 0, 0, l_f[tid]->bias, l_f[tid]->N, deviceCount);
for(int j = 1; j < deviceCount; j++){
hipMemcpyPeer(l_c1[j]->weight, j, l_c1[tid]->weight, tid, sizeof(float) * l_c1[tid]->M * l_c1[tid]->N);
hipMemcpyPeer(l_c2[j]->weight, j, l_c2[tid]->weight, tid, sizeof(float) * l_c2[tid]->M * l_c2[tid]->N);
hipMemcpyPeer(l_f[j]->weight, j, l_f[tid]->weight, tid, sizeof(float) * l_f[tid]->M * l_f[tid]->N);
hipMemcpyPeer(l_c1[j]->bias, j, l_c1[tid]->bias, tid, sizeof(float) * l_c1[tid]->N);
hipMemcpyPeer(l_c2[j]->bias, j, l_c2[tid]->bias, tid, sizeof(float) * l_c2[tid]->N);
hipMemcpyPeer(l_f[j]->bias, j, l_f[tid]->bias, tid, sizeof(float) * l_f[tid]->N);
}
}
}
}
}
fprintf(stdout, "\n finish iter %d \n", iter);
err /= train_cnt;
double accuracy = 100 - double(err) * 100.0;
fprintf(stdout, "accuracy: %.2lf%% , time_on_gpu: %lf sec\n", accuracy, time_taken);
if (err < threshold) {
fprintf(stdout, "Training complete, error less than threshold\n\n");
break;
}
}
auto end_time = std::chrono::steady_clock::now();
std::chrono::duration<double> diff = end_time - start_time;
double seconds = diff.count();
fprintf(stdout, "\n Time - %lf s\n", seconds);
}
// Returns label of given data (0-9)
static int* classify(float input[data_per_node][28][28], int tid)
{
float res[data_per_node * 10];
forward_pass(input, tid);
int* max = new int[data_per_node]{0};
hipMemcpy(&res[0], l_f[tid]->output, sizeof(float) * 10 * data_per_node, hipMemcpyDeviceToHost);
for(int j = 0; j < data_per_node; j++){
for (int i = 0; i < 10; i++) {
if (res[10 * j + max[j]] < res[10 * j + i]) {
max[j] = i;
}
}
}
return max;
}
// Perform forward propagation of test data
static void test(int tid)
{
hipSetDevice(tid);
int error = 0;
int batch_cnt = test_cnt / data_per_node;
for(int p = 0; p < batch_cnt; ++p){
float input[data_per_node][28][28];
for(int k = 0; k < data_per_node; ++k){
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[k][i][j] = (test_set[data_per_node * p + k].data)[i][j];
}
}
}
int* max = classify(input, tid);
for (int i = 0; i < data_per_node; ++i) {
if (max[i] != test_set[data_per_node * p + i].label) {
++error;
}
}
}
double err_percent = double(error) / double(test_cnt) * 100.0;
fprintf(stdout, "Error Rate: %.2lf%% , accuracy: %.2lf%%\n",err_percent,100-err_percent);
} | deafa5d16c7454ffc8adacd4dd153e8005ff57a4.cu | #define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include "layer.h"
#include <cuda.h>
#include <cstdio>
#include <time.h>
#include <iostream>
#include <string>
#include <chrono>
#include <vector>
#include <omp.h>
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
static std::vector<Layer*> l_input;
static std::vector<Layer*> l_c1;
static std::vector<Layer*> l_c2;
static std::vector<Layer*> l_c3;
static std::vector<Layer*> l_f;
static std::vector<Layer*> l_r;
float* l_c1_weight;
float* l_c2_weight;
float* l_c3_weight;
float* l_f_weight;
float* l_r_weight;
float* l_c1_bias;
float* l_c2_bias;
float* l_c3_bias;
float* l_f_bias;
float* l_r_bias;
int deviceCount = 0;
static void learn();
static int* classify(float input[data_per_node][28][28], int tid);
static void test(int tid);
static double forward_pass(float input[data_per_node][28][28], int tid);
static double back_pass(int tid);
__global__ void weight_update(float* dest, float* d_weight, int N, int deviceCount)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
if(pos < N * (deviceCount-1)){
int idx = pos % N;
atomicAdd(&dest[idx], d_weight[pos]);
}
}
__global__ void bias_update(float* dest, float* bias, int N, int deviceCount)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
if(pos < N * (deviceCount-1)){
int idx = pos % N;
atomicAdd(&dest[idx], bias[pos]);
}
}
__global__ void weight_average(float* weight, int N, int deviceCount)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
if(pos < N){
weight[pos] /= deviceCount;
}
}
static inline void loaddata()
{
mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
}
int main(int argc, const char **argv)
{
srand(time(NULL));
fprintf(stdout ,"begin\n");
CUresult err = cuInit(0);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err);
return 1;
}
loaddata();
learn();
fprintf(stdout, "begin test");
test(0);
return 0;
}
// Forward propagation of a single row in dataset
static double forward_pass(float input[data_per_node][28][28], int tid)
{
l_input[tid]->clear();
l_c1[tid]->clear();
l_c2[tid]->clear();
l_c3[tid]->clear();
l_f[tid]->clear();
l_r[tid]->clear();
clock_t start, end;
start = clock();
l_input[tid]->setOutput((float *)input);
fp_preact_c1<<<2048, 1024>>>((float (*)[28][28])l_input[tid]->output, (float (*)[6][24][24])l_c1[tid]->preact, (float (*)[5][5])l_c1[tid]->weight);
fp_bias_c1<<<2048, 1024>>>((float (*)[6][24][24])l_c1[tid]->preact, l_c1[tid]->bias);
apply_sigmoid<<<2048, 1024>>>(l_c1[tid]->preact, l_c1[tid]->output, l_c1[tid]->O);
fp_preact_r<<<2048, 1024>>>((float (*)[6][24][24])l_c1[tid]->preact, (float (*)[6][6][6])l_r[tid]->preact, (float (*)[4][4])l_r[tid]->weight);
fp_bias_r<<<2048, 1024>>>((float (*)[6][6][6])l_r[tid]->preact, l_r[tid]->bias);
fp_preact_c2<<<2048, 1024>>>((float (*)[6][24][24])l_c1[tid]->output, (float (*)[6][12][12])l_c2[tid]->preact, (float (*)[2][2])l_c2[tid]->weight);
fp_bias_c2<<<2048, 1024>>>((float (*)[6][12][12])l_c2[tid]->preact, l_c2[tid]->bias);
apply_sigmoid<<<2048, 1024>>>(l_c2[tid]->preact, l_c2[tid]->output, l_c2[tid]->O);
fp_preact_c3<<<2048, 1024>>>((float (*)[6][12][12])l_c2[tid]->output, (float (*)[6][6][6])l_c3[tid]->preact, (float (*)[2][2])l_c3[tid]->weight);
fp_bias_c3<<<2048, 1024>>>((float (*)[6][6][6])l_c3[tid]->preact, l_c3[tid]->bias);
fp_add_res<<<2048, 1024>>>((float (*)[6][6][6])l_c3[tid]->preact, (float (*)[6][6][6])l_r[tid]->preact);
apply_sigmoid<<<2048, 1024>>>(l_c3[tid]->preact, l_c3[tid]->output, l_c3[tid]->O);
fp_preact_f<<<2048, 1024>>>((float (*)[6][6][6])l_c3[tid]->output, (float (*)[10])l_f[tid]->preact, (float (*)[6][6][6])l_f[tid]->weight);
fp_bias_f<<<2048, 1024>>>((float (*)[10])l_f[tid]->preact, l_f[tid]->bias);
apply_sigmoid<<<2048, 1024>>>(l_f[tid]->preact, l_f[tid]->output, l_f[tid]->O);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Back propagation to update weights
static double back_pass(int tid)
{
// fprintf(stdout, "start backward\n");
//fprintf(stdout, "\n here \n");
clock_t start, end;
start = clock();
bp_weight_f<<<2048, 1024>>>((float (*)[6][6][6])l_f[tid]->d_weight, (float (*)[10])l_f[tid]->d_preact, (float (*)[6][6][6])l_c3[tid]->output);
bp_bias_f<<<2048, 1024>>>(l_f[tid]->bias, (float (*)[10])l_f[tid]->d_preact);
bp_output_c3<<<2048, 1024>>>((float (*)[6][6][6])l_c3[tid]->d_output, (float (*)[6][6][6])l_f[tid]->weight, (float (*)[10])l_f[tid]->d_preact);
bp_preact_c3<<<2048, 1024>>>((float (*)[6][6][6])l_c3[tid]->d_preact, (float (*)[6][6][6])l_c3[tid]->d_output, (float (*)[6][6][6])l_c3[tid]->preact);
bp_weight_c3<<<2048, 1024>>>((float (*)[2][2])l_c3[tid]->d_weight, (float (*)[6][6][6])l_c3[tid]->d_preact, (float (*)[6][12][12])l_c2[tid]->output);
bp_bias_c3<<<2048, 1024>>>(l_c3[tid]->bias, (float (*)[6][6][6])l_c3[tid]->d_preact);
bp_output_c2<<<2048, 1024>>>((float (*)[6][12][12])l_c2[tid]->d_output, (float (*)[2][2])l_c3[tid]->weight, (float (*)[6][6][6])l_c3[tid]->d_preact);
bp_preact_c2<<<2048, 1024>>>((float (*)[6][12][12])l_c2[tid]->d_preact, (float (*)[6][12][12])l_c2[tid]->d_output, (float (*)[6][12][12])l_c2[tid]->preact);
bp_weight_c2<<<2048, 1024>>>((float (*)[2][2])l_c2[tid]->d_weight, (float (*)[6][12][12])l_c2[tid]->d_preact, (float (*)[6][24][24])l_c1[tid]->output);
bp_bias_c2<<<2048, 1024>>>(l_c2[tid]->bias, (float (*)[6][12][12])l_c2[tid]->d_preact);
bp_output_c1<<<2048, 1024>>>((float (*)[6][24][24])l_c1[tid]->d_output, (float (*)[2][2])l_c2[tid]->weight, (float (*)[6][12][12])l_c2[tid]->d_preact);
bp_preact_c1<<<2048, 1024>>>((float (*)[6][24][24])l_c1[tid]->d_preact, (float (*)[6][24][24])l_c1[tid]->d_output, (float (*)[6][24][24])l_c1[tid]->preact);
bp_weight_c1<<<2048, 1024>>>((float (*)[5][5])l_c1[tid]->d_weight, (float (*)[6][24][24])l_c1[tid]->d_preact, (float (*)[28][28])l_input[tid]->output);
bp_bias_c1<<<2048, 1024>>>(l_c1[tid]->bias, (float (*)[6][24][24])l_c1[tid]->d_preact);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Unfold the input layer
static void unfold_input(double input[28][28], double unfolded[24*24][5*5])
{
int a = 0;
(void)unfold_input;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j) {
int b = 0;
for (int x = i; x < i + 2; ++x)
for (int y = j; y < j+2; ++y)
unfolded[a][b++] = input[x][y];
a++;
}
}
static void learn()
{
// train_cnt = 15000;
cudaGetDeviceCount(&deviceCount);
omp_set_num_threads(deviceCount);
cublasHandle_t blas[deviceCount];
l_input = std::vector<Layer*>(deviceCount);
l_c1 = std::vector<Layer*>(deviceCount);
l_c2 = std::vector<Layer*>(deviceCount);
l_c3 = std::vector<Layer*>(deviceCount);
l_f = std::vector<Layer*>(deviceCount);
l_r = std::vector<Layer*>(deviceCount);
float err;
int iter = 20;
double time_taken = 0.0;
fprintf(stdout ,"Learning\n");
// int canAccess = 0;
// for(int i = 1; i < deviceCount; i++){
// cudaDeviceCanAccessPeer(&canAccess, 0, i);
// std::cout << canAccess << std::endl;
// cudaDeviceCanAccessPeer(&canAccess, i, 0);
// std::cout << canAccess << std::endl;
// }
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
l_input[i] = new Layer(0, 0, 28*28 * data_per_node);
l_c1[i] = new Layer(5*5, 6, 24*24*6 * data_per_node);
l_c2[i] = new Layer(2*2, 6, 12*12*6 * data_per_node);
l_c3[i] = new Layer(2*2, 6, 6*6*6 * data_per_node);
l_f[i] = new Layer(6*6*6, 10, 10 * data_per_node);
l_r[i] = new Layer(4*4,1,6*6*6 * data_per_node);
cublasCreate(&blas[i]);
if(i == 0){
l_c1_weight = new float[5*5*6*(deviceCount-1)];
l_c2_weight = new float[2*2*6*(deviceCount-1)];
// l_c3_weight = new float[2*2*6*(deviceCount-1)];
l_f_weight = new float[6*6*6*10*(deviceCount-1)];
// l_r_weight = new float[4*4*1*(deviceCount-1)];
l_c1_bias = new float[6*(deviceCount-1)];
l_c2_bias = new float[6*(deviceCount-1)];
// l_c3_bias = new float[6*(deviceCount-1)];
l_f_bias = new float[10*(deviceCount-1)];
// l_r_bias = new float[1*(deviceCount-1)];
cudaMalloc(&l_c1_weight, sizeof(float) * 5*5*6*(deviceCount-1));
cudaMalloc(&l_c2_weight, sizeof(float) * 2*2*6*(deviceCount-1));
// cudaMalloc(&l_c3_weight, sizeof(float) * 2*2*6*(deviceCount-1));
cudaMalloc(&l_f_weight, sizeof(float) * 6*6*6*10*(deviceCount-1));
// cudaMalloc(&l_r_weight, sizeof(float) * 4*4*1*(deviceCount-1));
cudaMalloc(&l_c1_bias, sizeof(float) * 6*(deviceCount-1));
cudaMalloc(&l_c2_bias, sizeof(float) * 6*(deviceCount-1));
// cudaMalloc(&l_c3_bias, sizeof(float) * 6*(deviceCount-1));
cudaMalloc(&l_f_bias, sizeof(float) * 10*(deviceCount-1));
// cudaMalloc(&l_r_bias, sizeof(float) * 1*(deviceCount-1));
}
}
cudaDeviceSynchronize();
auto start_time = std::chrono::steady_clock::now();
while (iter < 0 || iter-- > 0) {
#pragma omp parallel num_threads(deviceCount)
{
err = 0.0f;
int tid = omp_get_thread_num();
cudaSetDevice(tid);
unsigned int* Y;
cudaMalloc(&Y, sizeof(unsigned int) * data_per_node);
int batch_cnt = train_cnt / data_per_node;
for (int q = 0; q < batch_cnt; q+=deviceCount) {
float tmp_err;
int p = q + tid;
float input[data_per_node][28][28];
unsigned int Y_host[data_per_node] = {0};
for(int k = 0; k < data_per_node; k++){
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[k][i][j] = (train_set[p * data_per_node + k].data)[i][j];
}
}
Y_host[k] = train_set[p * data_per_node + k].label;
}
time_taken += forward_pass(input, tid);
l_f[tid]->bp_clear();
l_c2[tid]->bp_clear();
l_c1[tid]->bp_clear();
l_c3[tid]->bp_clear();
// cudaMemset(Y, 0, sizeof(unsigned int) * data_per_node);
cudaMemcpy(Y, Y_host, sizeof(unsigned int) * data_per_node, cudaMemcpyHostToDevice);
makeError<<<data_per_node, 10>>>(l_f[tid]->d_preact, l_f[tid]->output, Y, 10 * data_per_node);
cublasSnrm2(blas[tid], 10 * data_per_node, l_f[tid]->d_preact, 1, &tmp_err);
err += tmp_err;
time_taken += back_pass(tid);
// fprintf(stdout, "device %d, finish iter %d \n", tid, p);
#pragma omp barrier
if(deviceCount == 1){
apply_grad<<<2048, 1024>>>(l_f[tid]->weight, l_f[tid]->d_weight, l_f[tid]->M * l_f[tid]->N);
apply_grad<<<2048, 1024>>>(l_c2[tid]->weight, l_c2[tid]->d_weight, l_c2[tid]->M * l_c2[tid]->N);
apply_grad<<<2048, 1024>>>(l_c1[tid]->weight, l_c1[tid]->d_weight, l_c1[tid]->M * l_c1[tid]->N);
}
else{
int tid = omp_get_thread_num();
cudaSetDevice(tid);
if(tid != 0){
cudaMemcpyPeer(&l_c1_weight[(tid-1) * l_c1[tid]->M * l_c1[tid]->N], 0, l_c1[tid]->d_weight, tid, sizeof(float) * l_c1[tid]->M * l_c1[tid]->N);
cudaMemcpyPeer(&l_c2_weight[(tid-1) * l_c2[tid]->M * l_c2[tid]->N], 0, l_c2[tid]->d_weight, tid, sizeof(float) * l_c2[tid]->M * l_c2[tid]->N);
cudaMemcpyPeer(&l_f_weight[(tid-1) * l_f[tid]->M * l_f[tid]->N], 0, l_f[tid]->d_weight, tid, sizeof(float) * l_f[tid]->M * l_f[tid]->N);
cudaMemcpyPeer(&l_c1_bias[(tid-1) * l_c1[tid]->N], 0, l_c1[tid]->bias, tid, sizeof(float) * l_c1[tid]->N);
cudaMemcpyPeer(&l_c2_bias[(tid-1) * l_c2[tid]->N], 0, l_c2[tid]->bias, tid, sizeof(float) * l_c2[tid]->N);
cudaMemcpyPeer(&l_f_bias[(tid-1) * l_f[tid]->N], 0, l_f[tid]->bias, tid, sizeof(float) * l_f[tid]->N);
}
#pragma omp barrier
if(tid == 0){
weight_update<<<2048, 1024>>>(l_c1[tid]->d_weight, l_c1_weight, l_c1[tid]->M * l_c1[tid]->N, deviceCount);
weight_update<<<2048, 1024>>>(l_c2[tid]->d_weight, l_c2_weight, l_c2[tid]->M * l_c2[tid]->N, deviceCount);
weight_update<<<2048, 1024>>>(l_f[tid]->d_weight, l_f_weight, l_f[tid]->M * l_f[tid]->N, deviceCount);
weight_average<<<2048, 1024>>>(l_c1[tid]->d_weight, l_c1[tid]->M * l_c1[tid]->N, deviceCount);
weight_average<<<2048, 1024>>>(l_c2[tid]->d_weight, l_c2[tid]->M * l_c2[tid]->N, deviceCount);
weight_average<<<2048, 1024>>>(l_f[tid]->d_weight, l_f[tid]->M * l_f[tid]->N, deviceCount);
apply_grad<<<2048, 1024>>>(l_f[tid]->weight, l_f[tid]->d_weight, l_f[tid]->M * l_f[tid]->N);
apply_grad<<<2048, 1024>>>(l_c2[tid]->weight, l_c2[tid]->d_weight, l_c2[tid]->M * l_c2[tid]->N);
apply_grad<<<2048, 1024>>>(l_c1[tid]->weight, l_c1[tid]->d_weight, l_c1[tid]->M * l_c1[tid]->N);
bias_update<<<2048, 1024>>>(l_c1[tid]->bias, &l_c1_bias[tid * l_c1[tid]->N], l_c1[tid]->N, deviceCount);
bias_update<<<2048, 1024>>>(l_c2[tid]->bias, &l_c2_bias[tid * l_c2[tid]->N], l_c2[tid]->N, deviceCount);
bias_update<<<2048, 1024>>>(l_f[tid]->bias, &l_f_bias[tid * l_f[tid]->N], l_f[tid]->N, deviceCount);
weight_average<<<2048, 1024>>>(l_c1[tid]->bias, l_c1[tid]->N, deviceCount);
weight_average<<<2048, 1024>>>(l_c2[tid]->bias, l_c2[tid]->N, deviceCount);
weight_average<<<2048, 1024>>>(l_f[tid]->bias, l_f[tid]->N, deviceCount);
for(int j = 1; j < deviceCount; j++){
cudaMemcpyPeer(l_c1[j]->weight, j, l_c1[tid]->weight, tid, sizeof(float) * l_c1[tid]->M * l_c1[tid]->N);
cudaMemcpyPeer(l_c2[j]->weight, j, l_c2[tid]->weight, tid, sizeof(float) * l_c2[tid]->M * l_c2[tid]->N);
cudaMemcpyPeer(l_f[j]->weight, j, l_f[tid]->weight, tid, sizeof(float) * l_f[tid]->M * l_f[tid]->N);
cudaMemcpyPeer(l_c1[j]->bias, j, l_c1[tid]->bias, tid, sizeof(float) * l_c1[tid]->N);
cudaMemcpyPeer(l_c2[j]->bias, j, l_c2[tid]->bias, tid, sizeof(float) * l_c2[tid]->N);
cudaMemcpyPeer(l_f[j]->bias, j, l_f[tid]->bias, tid, sizeof(float) * l_f[tid]->N);
}
}
}
}
}
fprintf(stdout, "\n finish iter %d \n", iter);
err /= train_cnt;
double accuracy = 100 - double(err) * 100.0;
fprintf(stdout, "accuracy: %.2lf%% , time_on_gpu: %lf sec\n", accuracy, time_taken);
if (err < threshold) {
fprintf(stdout, "Training complete, error less than threshold\n\n");
break;
}
}
auto end_time = std::chrono::steady_clock::now();
std::chrono::duration<double> diff = end_time - start_time;
double seconds = diff.count();
fprintf(stdout, "\n Time - %lf s\n", seconds);
}
// Returns label of given data (0-9)
static int* classify(float input[data_per_node][28][28], int tid)
{
float res[data_per_node * 10];
forward_pass(input, tid);
int* max = new int[data_per_node]{0};
cudaMemcpy(&res[0], l_f[tid]->output, sizeof(float) * 10 * data_per_node, cudaMemcpyDeviceToHost);
for(int j = 0; j < data_per_node; j++){
for (int i = 0; i < 10; i++) {
if (res[10 * j + max[j]] < res[10 * j + i]) {
max[j] = i;
}
}
}
return max;
}
// Perform forward propagation of test data
static void test(int tid)
{
cudaSetDevice(tid);
int error = 0;
int batch_cnt = test_cnt / data_per_node;
for(int p = 0; p < batch_cnt; ++p){
float input[data_per_node][28][28];
for(int k = 0; k < data_per_node; ++k){
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[k][i][j] = (test_set[data_per_node * p + k].data)[i][j];
}
}
}
int* max = classify(input, tid);
for (int i = 0; i < data_per_node; ++i) {
if (max[i] != test_set[data_per_node * p + i].label) {
++error;
}
}
}
double err_percent = double(error) / double(test_cnt) * 100.0;
fprintf(stdout, "Error Rate: %.2lf%% , accuracy: %.2lf%%\n",err_percent,100-err_percent);
} |
775d940d718521687fabd321fc5451a43004c458.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
| 775d940d718521687fabd321fc5451a43004c458.cu | /***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_maximum_plus_dsrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::maximum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_maximum_plus_dsrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
|
675c76b931c9f8f931d2d29d8f7a8861fc756118.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
extern "C" {
#include "bmp.h"
}
__global__ void render(char *img, int width, int height) {
unsigned int x_dim = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y_dim = blockIdx.y*blockDim.y + threadIdx.y;
int index = 3*width*y_dim + x_dim*3;
float x_origin = ((float) x_dim/width)*3.25 - 2;
float y_origin = ((float) y_dim/width)*2.5 - 1.25;
float x = 0.0;
float y = 0.0;
int iteration = 0;
int max_iteration = 256;
while(x*x + y*y <= 4 && iteration < max_iteration) {
float xtemp = x*x - y*y + x_origin;
y = 2*x*y + y_origin;
x = xtemp;
iteration++;
}
if(iteration == max_iteration) {
img[index] = 0;
img[index + 1] = 0;
img[index + 2] = 0;
} else {
img[index] = iteration;
img[index + 1] = iteration;
img[index + 2] = iteration;
}
}
void runCUDA(int width, int height)
{
size_t buffer_size = sizeof(char) * width * height * 3;
char *image;
hipMalloc((void **) &image, buffer_size);
char *host_image = (char *) malloc(buffer_size);
dim3 blockDim(16, 16, 1);
dim3 gridDim(width / blockDim.x, height / blockDim.y, 1);
hipLaunchKernelGGL(( render), dim3(gridDim), dim3(blockDim), 0 , 0, image, width, height);
hipMemcpy(host_image, image, buffer_size, hipMemcpyDeviceToHost);
write_bmp("output.bmp", width, height, host_image);
hipFree(image);
free(host_image);
}
int main(int argc, const char * argv[]) {
runCUDA(1024, 1024);
return 0;
}
| 675c76b931c9f8f931d2d29d8f7a8861fc756118.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
extern "C" {
#include "bmp.h"
}
__global__ void render(char *img, int width, int height) {
unsigned int x_dim = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y_dim = blockIdx.y*blockDim.y + threadIdx.y;
int index = 3*width*y_dim + x_dim*3;
float x_origin = ((float) x_dim/width)*3.25 - 2;
float y_origin = ((float) y_dim/width)*2.5 - 1.25;
float x = 0.0;
float y = 0.0;
int iteration = 0;
int max_iteration = 256;
while(x*x + y*y <= 4 && iteration < max_iteration) {
float xtemp = x*x - y*y + x_origin;
y = 2*x*y + y_origin;
x = xtemp;
iteration++;
}
if(iteration == max_iteration) {
img[index] = 0;
img[index + 1] = 0;
img[index + 2] = 0;
} else {
img[index] = iteration;
img[index + 1] = iteration;
img[index + 2] = iteration;
}
}
void runCUDA(int width, int height)
{
size_t buffer_size = sizeof(char) * width * height * 3;
char *image;
cudaMalloc((void **) &image, buffer_size);
char *host_image = (char *) malloc(buffer_size);
dim3 blockDim(16, 16, 1);
dim3 gridDim(width / blockDim.x, height / blockDim.y, 1);
render<<< gridDim, blockDim, 0 >>>(image, width, height);
cudaMemcpy(host_image, image, buffer_size, cudaMemcpyDeviceToHost);
write_bmp("output.bmp", width, height, host_image);
cudaFree(image);
free(host_image);
}
int main(int argc, const char * argv[]) {
runCUDA(1024, 1024);
return 0;
}
|
ef3cade48f6bfc32c14355b30aa27174fa4f7585.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <iostream>
#include <time.h>
#define TILE_SIZE 4
#define WINDOW_SIZE (3)
template<class IMG_TYPE>
__global__ void kernelMedian( const IMG_TYPE * __restrict__ in, IMG_TYPE *output, int j_dim, int pitch)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned char filterVector[9] = {0,0,0,0,0,0,0,0,0};
if((row==0) || (col==0) || (row==pitch-1) || (col==j_dim-1))
output[row*j_dim+col] = 0; //Deal with boundry conditions
else {
for (int x = 0; x < WINDOW_SIZE; x++) {
for (int y = 0; y < WINDOW_SIZE; y++){
filterVector[x*WINDOW_SIZE+y] = in[(row+x-1)*j_dim+(col+y-1)]; // setup the filterign window.
}
}
for (int i = 0; i < 9; i++) {
for (int j = i + 1; j < 9; j++) {
if (filterVector[i] > filterVector[j]) {
//Swap the variables.
char tmp = filterVector[i];
filterVector[i] = filterVector[j];
filterVector[j] = tmp;
}
}
}
output[row*j_dim+col] = filterVector[4]; //Set the output variables.
}
}
template<class IMG_TYPE>
__global__ void medianFilterSharedKernel(const IMG_TYPE * __restrict__ inputImageKernel, IMG_TYPE *outputImagekernel, int imageWidth, int imageHeight)
{
//Set the row and col value for each thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ unsigned char sharedmem[(TILE_SIZE+2)] [(TILE_SIZE+2)]; //initialize shared memory
//Take some values.
bool is_x_left = (threadIdx.x == 0), is_x_right = (threadIdx.x == TILE_SIZE-1);
bool is_y_top = (threadIdx.y == 0), is_y_bottom = (threadIdx.y == TILE_SIZE-1);
//Initialize with zero
if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y+1] = 0;
else if(is_x_right)
sharedmem[threadIdx.x + 2][threadIdx.y+1]=0;
if (is_y_top){
sharedmem[threadIdx.x+1][threadIdx.y] = 0;
if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y] = 0;
else if(is_x_right)
sharedmem[threadIdx.x+2][threadIdx.y] = 0;
}
else if (is_y_bottom){
sharedmem[threadIdx.x+1][threadIdx.y+2] = 0;
if(is_x_right)
sharedmem[threadIdx.x+2][threadIdx.y+2] = 0;
else if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y+2] = 0;
}
//Setup pixel values
sharedmem[threadIdx.x+1][threadIdx.y+1] = inputImageKernel[row*imageWidth+col];
//Check for boundry conditions.
if(is_x_left && (col>0))
sharedmem[threadIdx.x][threadIdx.y+1] = inputImageKernel[row*imageWidth+(col-1)];
else if(is_x_right && (col<imageWidth-1))
sharedmem[threadIdx.x + 2][threadIdx.y+1]= inputImageKernel[row*imageWidth+(col+1)];
if (is_y_top && (row>0)){
sharedmem[threadIdx.x+1][threadIdx.y] = inputImageKernel[(row-1)*imageWidth+col];
if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y] = inputImageKernel[(row-1)*imageWidth+(col-1)];
else if(is_x_right )
sharedmem[threadIdx.x+2][threadIdx.y] = inputImageKernel[(row-1)*imageWidth+(col+1)];
}
else if (is_y_bottom && (row<imageHeight-1)){
sharedmem[threadIdx.x+1][threadIdx.y+2] = inputImageKernel[(row+1)*imageWidth + col];
if(is_x_right)
sharedmem[threadIdx.x+2][threadIdx.y+2] = inputImageKernel[(row+1)*imageWidth+(col+1)];
else if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y+2] = inputImageKernel[(row+1)*imageWidth+(col-1)];
}
__syncthreads(); //Wait for all threads to be done.
//Setup the filter.
unsigned char filterVector[9] = {sharedmem[threadIdx.x][threadIdx.y], sharedmem[threadIdx.x+1][threadIdx.y], sharedmem[threadIdx.x+2][threadIdx.y],
sharedmem[threadIdx.x][threadIdx.y+1], sharedmem[threadIdx.x+1][threadIdx.y+1], sharedmem[threadIdx.x+2][threadIdx.y+1],
sharedmem[threadIdx.x] [threadIdx.y+2], sharedmem[threadIdx.x+1][threadIdx.y+2], sharedmem[threadIdx.x+2][threadIdx.y+2]};
{
for (int i = 0; i < 9; i++) {
for (int j = i + 1; j < 9; j++) {
if (filterVector[i] > filterVector[j]) {
//Swap Values.
char tmp = filterVector[i];
filterVector[i] = filterVector[j];
filterVector[j] = tmp;
}
}
}
outputImagekernel[row*imageWidth+col] = filterVector[4]; //Set the output image values.
}
}
| ef3cade48f6bfc32c14355b30aa27174fa4f7585.cu | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <iostream>
#include <time.h>
#define TILE_SIZE 4
#define WINDOW_SIZE (3)
template<class IMG_TYPE>
__global__ void kernelMedian( const IMG_TYPE * __restrict__ in, IMG_TYPE *output, int j_dim, int pitch)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned char filterVector[9] = {0,0,0,0,0,0,0,0,0};
if((row==0) || (col==0) || (row==pitch-1) || (col==j_dim-1))
output[row*j_dim+col] = 0; //Deal with boundry conditions
else {
for (int x = 0; x < WINDOW_SIZE; x++) {
for (int y = 0; y < WINDOW_SIZE; y++){
filterVector[x*WINDOW_SIZE+y] = in[(row+x-1)*j_dim+(col+y-1)]; // setup the filterign window.
}
}
for (int i = 0; i < 9; i++) {
for (int j = i + 1; j < 9; j++) {
if (filterVector[i] > filterVector[j]) {
//Swap the variables.
char tmp = filterVector[i];
filterVector[i] = filterVector[j];
filterVector[j] = tmp;
}
}
}
output[row*j_dim+col] = filterVector[4]; //Set the output variables.
}
}
template<class IMG_TYPE>
__global__ void medianFilterSharedKernel(const IMG_TYPE * __restrict__ inputImageKernel, IMG_TYPE *outputImagekernel, int imageWidth, int imageHeight)
{
//Set the row and col value for each thread.
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ unsigned char sharedmem[(TILE_SIZE+2)] [(TILE_SIZE+2)]; //initialize shared memory
//Take some values.
bool is_x_left = (threadIdx.x == 0), is_x_right = (threadIdx.x == TILE_SIZE-1);
bool is_y_top = (threadIdx.y == 0), is_y_bottom = (threadIdx.y == TILE_SIZE-1);
//Initialize with zero
if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y+1] = 0;
else if(is_x_right)
sharedmem[threadIdx.x + 2][threadIdx.y+1]=0;
if (is_y_top){
sharedmem[threadIdx.x+1][threadIdx.y] = 0;
if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y] = 0;
else if(is_x_right)
sharedmem[threadIdx.x+2][threadIdx.y] = 0;
}
else if (is_y_bottom){
sharedmem[threadIdx.x+1][threadIdx.y+2] = 0;
if(is_x_right)
sharedmem[threadIdx.x+2][threadIdx.y+2] = 0;
else if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y+2] = 0;
}
//Setup pixel values
sharedmem[threadIdx.x+1][threadIdx.y+1] = inputImageKernel[row*imageWidth+col];
//Check for boundry conditions.
if(is_x_left && (col>0))
sharedmem[threadIdx.x][threadIdx.y+1] = inputImageKernel[row*imageWidth+(col-1)];
else if(is_x_right && (col<imageWidth-1))
sharedmem[threadIdx.x + 2][threadIdx.y+1]= inputImageKernel[row*imageWidth+(col+1)];
if (is_y_top && (row>0)){
sharedmem[threadIdx.x+1][threadIdx.y] = inputImageKernel[(row-1)*imageWidth+col];
if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y] = inputImageKernel[(row-1)*imageWidth+(col-1)];
else if(is_x_right )
sharedmem[threadIdx.x+2][threadIdx.y] = inputImageKernel[(row-1)*imageWidth+(col+1)];
}
else if (is_y_bottom && (row<imageHeight-1)){
sharedmem[threadIdx.x+1][threadIdx.y+2] = inputImageKernel[(row+1)*imageWidth + col];
if(is_x_right)
sharedmem[threadIdx.x+2][threadIdx.y+2] = inputImageKernel[(row+1)*imageWidth+(col+1)];
else if(is_x_left)
sharedmem[threadIdx.x][threadIdx.y+2] = inputImageKernel[(row+1)*imageWidth+(col-1)];
}
__syncthreads(); //Wait for all threads to be done.
//Setup the filter.
unsigned char filterVector[9] = {sharedmem[threadIdx.x][threadIdx.y], sharedmem[threadIdx.x+1][threadIdx.y], sharedmem[threadIdx.x+2][threadIdx.y],
sharedmem[threadIdx.x][threadIdx.y+1], sharedmem[threadIdx.x+1][threadIdx.y+1], sharedmem[threadIdx.x+2][threadIdx.y+1],
sharedmem[threadIdx.x] [threadIdx.y+2], sharedmem[threadIdx.x+1][threadIdx.y+2], sharedmem[threadIdx.x+2][threadIdx.y+2]};
{
for (int i = 0; i < 9; i++) {
for (int j = i + 1; j < 9; j++) {
if (filterVector[i] > filterVector[j]) {
//Swap Values.
char tmp = filterVector[i];
filterVector[i] = filterVector[j];
filterVector[j] = tmp;
}
}
}
outputImagekernel[row*imageWidth+col] = filterVector[4]; //Set the output image values.
}
}
|
9a2d28de7ef0ab008cf7b81d07ca0be27e591b62.hip | // !!! This is a file automatically generated by hipify!!!
#include <cusp/csr_matrix.h>
#include <cusp/monitor.h>
#include <cusp/krylov/gmres.h>
#include <cusp/io/matrix_market.h>
#include <cusp/precond/diagonal.h>
#include <fstream>
#include <iostream>
#include <sys/time.h>
void load_vector(const char* flname, cusp::array1d<float, cusp::host_memory> &v)
{
std::ifstream input_data;
input_data.open(flname);
int n_elements;
input_data >> n_elements;
v.resize(n_elements);
for(int i = 0; i < n_elements; i++)
{
float entry;
input_data >> entry;
v[i] = entry;
}
input_data.close();
}
void write_vector(const char* flname, cusp::array1d<float, cusp::host_memory> &v)
{
std::ofstream output_data;
output_data.open(flname, std::ofstream::out | std::ofstream::trunc);
output_data << v.size() << std::endl;
for(int i = 0; i < v.size(); i++)
{
output_data << v[i] << std::endl;
}
output_data.close();
}
long int elapsed_time_ms(struct timespec &start, struct timespec &end)
{
return (end.tv_sec * 1000 + end.tv_nsec / (1000 * 1000)) -
(start.tv_sec * 1000 + start.tv_nsec / (1000 * 1000));
}
int main(int argc, char** argv)
{
const char* PRECOND_NONE = "none";
const char* PRECOND_DIAG = "diag";
if(argc != 5)
{
std::cout << "Usage: " << argv[0] << " <preconditioner> <matrix_flname> <input_vector_flname> <output_vector_flname>" << std::endl;
std::cout << std::endl;
std::cout << "Preconditioner can be one of: " << PRECOND_NONE << " " << PRECOND_DIAG << std::endl;
return 1;
}
if(strcmp(argv[1], PRECOND_NONE) != 0 and strcmp(argv[1], PRECOND_DIAG) != 0)
{
std::cout << "Preconditioner must be one of: " << PRECOND_NONE << " " << PRECOND_DIAG << std::endl;
return 1;
}
else
{
std::cout << "Using preconditioner: " << argv[1] << std::endl;
}
// create an empty sparse matrix structure (CSR format)
cusp::csr_matrix<int, float, cusp::host_memory> A_host;
// read matrix
cusp::io::read_matrix_market_file(A_host, argv[2]);
// create empty array
cusp::array1d<float, cusp::host_memory> b_host(A_host.num_cols, 0);
// read vector
load_vector(argv[3], b_host);
std::cout << "Matrix dimensions: " << A_host.num_rows << " " << A_host.num_cols << std::endl;
std::cout << "Vector length : " << b_host.size() << std::endl;
struct timespec copy_start;
struct timespec copy_end;
struct timespec exec_start;
struct timespec exec_end;
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, ©_start);
cusp::csr_matrix<int, float, cusp::device_memory> A(A_host);
cusp::array1d<float, cusp::device_memory> b(b_host);
cusp::array1d<float, cusp::device_memory> x(A.num_rows, 0);
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, ©_end);
// set stopping criteria:
// iteration_limit = 5000
// relative_tolerance = 1e-6
// absolute_tolerance = 1e-6
// verbose = true
cusp::monitor<float> monitor(b, 5000, 1e-6, 1e-6, false);
int restart = 50;
// solve the linear system A x = b
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &exec_start);
// set preconditioner
if (strcmp(argv[1], PRECOND_DIAG) == 0)
{
cusp::precond::diagonal<float, cusp::device_memory> M(A);
cusp::krylov::gmres(A, x, b, restart, monitor, M);
}
else if(strcmp(argv[1], PRECOND_NONE) == 0)
{
cusp::identity_operator<float, cusp::device_memory> M(A.num_rows, A.num_cols);
cusp::krylov::gmres(A, x, b, restart, monitor, M);
}
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &exec_end);
// copy results back and write out
cusp::array1d<float, cusp::host_memory> x_host(x);
write_vector(argv[4], x_host);
if(monitor.converged())
{
std::cout << "Solver converged to " << monitor.relative_tolerance() << " relative tolerance";
std::cout << " with residual norm " << monitor.residual_norm();
std::cout << " after " << monitor.iteration_count() << " iterations" << std::endl;
} else {
std::cout << "Solver reached iteration limit " << monitor.iteration_limit() << " before converging";
std::cout << " to " << monitor.relative_tolerance() << " relative tolerance " << std::endl;
}
long int copy_time = elapsed_time_ms(copy_start, copy_end);
long int execution_time = elapsed_time_ms(exec_start, exec_end);
std::cout << "Copy time (ms): " << copy_time << std::endl;
std::cout << "Execution time (ms): " << execution_time << std::endl;
return 0;
}
| 9a2d28de7ef0ab008cf7b81d07ca0be27e591b62.cu | #include <cusp/csr_matrix.h>
#include <cusp/monitor.h>
#include <cusp/krylov/gmres.h>
#include <cusp/io/matrix_market.h>
#include <cusp/precond/diagonal.h>
#include <fstream>
#include <iostream>
#include <sys/time.h>
void load_vector(const char* flname, cusp::array1d<float, cusp::host_memory> &v)
{
std::ifstream input_data;
input_data.open(flname);
int n_elements;
input_data >> n_elements;
v.resize(n_elements);
for(int i = 0; i < n_elements; i++)
{
float entry;
input_data >> entry;
v[i] = entry;
}
input_data.close();
}
void write_vector(const char* flname, cusp::array1d<float, cusp::host_memory> &v)
{
std::ofstream output_data;
output_data.open(flname, std::ofstream::out | std::ofstream::trunc);
output_data << v.size() << std::endl;
for(int i = 0; i < v.size(); i++)
{
output_data << v[i] << std::endl;
}
output_data.close();
}
long int elapsed_time_ms(struct timespec &start, struct timespec &end)
{
return (end.tv_sec * 1000 + end.tv_nsec / (1000 * 1000)) -
(start.tv_sec * 1000 + start.tv_nsec / (1000 * 1000));
}
int main(int argc, char** argv)
{
const char* PRECOND_NONE = "none";
const char* PRECOND_DIAG = "diag";
if(argc != 5)
{
std::cout << "Usage: " << argv[0] << " <preconditioner> <matrix_flname> <input_vector_flname> <output_vector_flname>" << std::endl;
std::cout << std::endl;
std::cout << "Preconditioner can be one of: " << PRECOND_NONE << " " << PRECOND_DIAG << std::endl;
return 1;
}
if(strcmp(argv[1], PRECOND_NONE) != 0 and strcmp(argv[1], PRECOND_DIAG) != 0)
{
std::cout << "Preconditioner must be one of: " << PRECOND_NONE << " " << PRECOND_DIAG << std::endl;
return 1;
}
else
{
std::cout << "Using preconditioner: " << argv[1] << std::endl;
}
// create an empty sparse matrix structure (CSR format)
cusp::csr_matrix<int, float, cusp::host_memory> A_host;
// read matrix
cusp::io::read_matrix_market_file(A_host, argv[2]);
// create empty array
cusp::array1d<float, cusp::host_memory> b_host(A_host.num_cols, 0);
// read vector
load_vector(argv[3], b_host);
std::cout << "Matrix dimensions: " << A_host.num_rows << " " << A_host.num_cols << std::endl;
std::cout << "Vector length : " << b_host.size() << std::endl;
struct timespec copy_start;
struct timespec copy_end;
struct timespec exec_start;
struct timespec exec_end;
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, ©_start);
cusp::csr_matrix<int, float, cusp::device_memory> A(A_host);
cusp::array1d<float, cusp::device_memory> b(b_host);
cusp::array1d<float, cusp::device_memory> x(A.num_rows, 0);
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, ©_end);
// set stopping criteria:
// iteration_limit = 5000
// relative_tolerance = 1e-6
// absolute_tolerance = 1e-6
// verbose = true
cusp::monitor<float> monitor(b, 5000, 1e-6, 1e-6, false);
int restart = 50;
// solve the linear system A x = b
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &exec_start);
// set preconditioner
if (strcmp(argv[1], PRECOND_DIAG) == 0)
{
cusp::precond::diagonal<float, cusp::device_memory> M(A);
cusp::krylov::gmres(A, x, b, restart, monitor, M);
}
else if(strcmp(argv[1], PRECOND_NONE) == 0)
{
cusp::identity_operator<float, cusp::device_memory> M(A.num_rows, A.num_cols);
cusp::krylov::gmres(A, x, b, restart, monitor, M);
}
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &exec_end);
// copy results back and write out
cusp::array1d<float, cusp::host_memory> x_host(x);
write_vector(argv[4], x_host);
if(monitor.converged())
{
std::cout << "Solver converged to " << monitor.relative_tolerance() << " relative tolerance";
std::cout << " with residual norm " << monitor.residual_norm();
std::cout << " after " << monitor.iteration_count() << " iterations" << std::endl;
} else {
std::cout << "Solver reached iteration limit " << monitor.iteration_limit() << " before converging";
std::cout << " to " << monitor.relative_tolerance() << " relative tolerance " << std::endl;
}
long int copy_time = elapsed_time_ms(copy_start, copy_end);
long int execution_time = elapsed_time_ms(exec_start, exec_end);
std::cout << "Copy time (ms): " << copy_time << std::endl;
std::cout << "Execution time (ms): " << execution_time << std::endl;
return 0;
}
|
fpgeneric.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
// Make generic operators for floating point types
/* This file contains:
Generalized library calls
kernels to be called for not supported data type
*/
// NV_TODO: optimize speed -- pass things needed in, optimize kernel speed, add half2
// NV_TODO: investigate cub support for half
#include "core/providers/cuda/cu_inc/common.cuh"
#include <hiprand/hiprand_kernel.h>
#define TRANS_TILE_DIM 32
#define BLOCK_ROWS 8
#define COPY_TILE_DIM 1024
#define COPY_BLOCK_DIM 256
// kernel(s) for half functions with no library support
namespace {
// TODO - refactor the function with similar logic in Transpose3DKernel using 16x16 Tile
__global__ void transposeNoOverlap(half* odata, const half* idata, const int m, const int n) {
__shared__ half tile[TRANS_TILE_DIM][TRANS_TILE_DIM + 1];
int x = blockIdx.x * TRANS_TILE_DIM + threadIdx.x;
int y = blockIdx.y * TRANS_TILE_DIM + threadIdx.y;
if (x < m) {
for (int j = 0; j < TRANS_TILE_DIM; j += BLOCK_ROWS) {
if (j >= (n - y)) continue;
tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * m + x];
}
}
__syncthreads();
x = blockIdx.y * TRANS_TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TRANS_TILE_DIM + threadIdx.y;
if (x >= n) return;
for (int j = 0; j < TRANS_TILE_DIM; j += BLOCK_ROWS) {
if ((y + j) >= m) return;
odata[(y + j) * n + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
__global__ void CopyVectorHalf(const half* x, int incx, half* y, int incy, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= n) return;
y[id * incy] = x[id * incx];
}
} // namespace
hipblasStatus_t cublasTransposeHelper(hipblasHandle_t, hipblasOperation_t, hipblasOperation_t, int m, int n, const half*, const half* A, int, const half*, const half*, int, half* C, int) {
if (C != A) {
dim3 dimGrid((n + TRANS_TILE_DIM - 1) / TRANS_TILE_DIM, (m + TRANS_TILE_DIM - 1) / TRANS_TILE_DIM, 1);
dim3 dimBlock(TRANS_TILE_DIM, BLOCK_ROWS, 1);
hipLaunchKernelGGL(( transposeNoOverlap), dim3(dimGrid), dim3(dimBlock), 0, 0, C, A, n, m);
} else {
return HIPBLAS_STATUS_NOT_SUPPORTED;
}
return HIPBLAS_STATUS_SUCCESS;
}
hipblasStatus_t cublasCopyHelper(hipblasHandle_t, int n, const half* x, int incx, half* y, int incy) {
dim3 dimGrid((unsigned int)(n + COPY_BLOCK_DIM - 1) / COPY_BLOCK_DIM, 1, 1);
dim3 dimBlock(COPY_BLOCK_DIM, 1, 1);
hipLaunchKernelGGL(( CopyVectorHalf), dim3(dimGrid), dim3(dimBlock), 0, 0, x, incx, y, incy, n);
return HIPBLAS_STATUS_SUCCESS;
} | fpgeneric.cu | //
// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
// Make generic operators for floating point types
/* This file contains:
Generalized library calls
kernels to be called for not supported data type
*/
// NV_TODO: optimize speed -- pass things needed in, optimize kernel speed, add half2
// NV_TODO: investigate cub support for half
#include "core/providers/cuda/cu_inc/common.cuh"
#include <curand_kernel.h>
#define TRANS_TILE_DIM 32
#define BLOCK_ROWS 8
#define COPY_TILE_DIM 1024
#define COPY_BLOCK_DIM 256
// kernel(s) for half functions with no library support
namespace {
// TODO - refactor the function with similar logic in Transpose3DKernel using 16x16 Tile
__global__ void transposeNoOverlap(half* odata, const half* idata, const int m, const int n) {
__shared__ half tile[TRANS_TILE_DIM][TRANS_TILE_DIM + 1];
int x = blockIdx.x * TRANS_TILE_DIM + threadIdx.x;
int y = blockIdx.y * TRANS_TILE_DIM + threadIdx.y;
if (x < m) {
for (int j = 0; j < TRANS_TILE_DIM; j += BLOCK_ROWS) {
if (j >= (n - y)) continue;
tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * m + x];
}
}
__syncthreads();
x = blockIdx.y * TRANS_TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TRANS_TILE_DIM + threadIdx.y;
if (x >= n) return;
for (int j = 0; j < TRANS_TILE_DIM; j += BLOCK_ROWS) {
if ((y + j) >= m) return;
odata[(y + j) * n + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
__global__ void CopyVectorHalf(const half* x, int incx, half* y, int incy, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= n) return;
y[id * incy] = x[id * incx];
}
} // namespace
cublasStatus_t cublasTransposeHelper(cublasHandle_t, cublasOperation_t, cublasOperation_t, int m, int n, const half*, const half* A, int, const half*, const half*, int, half* C, int) {
if (C != A) {
dim3 dimGrid((n + TRANS_TILE_DIM - 1) / TRANS_TILE_DIM, (m + TRANS_TILE_DIM - 1) / TRANS_TILE_DIM, 1);
dim3 dimBlock(TRANS_TILE_DIM, BLOCK_ROWS, 1);
transposeNoOverlap<<<dimGrid, dimBlock>>>(C, A, n, m);
} else {
return CUBLAS_STATUS_NOT_SUPPORTED;
}
return CUBLAS_STATUS_SUCCESS;
}
cublasStatus_t cublasCopyHelper(cublasHandle_t, int n, const half* x, int incx, half* y, int incy) {
dim3 dimGrid((unsigned int)(n + COPY_BLOCK_DIM - 1) / COPY_BLOCK_DIM, 1, 1);
dim3 dimBlock(COPY_BLOCK_DIM, 1, 1);
CopyVectorHalf<<<dimGrid, dimBlock>>>(x, incx, y, incy, n);
return CUBLAS_STATUS_SUCCESS;
} |
7528897301074c4d70e1408a673343570b388613.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
__global__ void vecAdd(int * in, int * out, int width) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < width)
out[index] += in[index];
}
int main(void) {
int ii;
int * in, * out;
int * d_in, * d_out;
int width = 1<<10;
wbTime_start(Generic, "Creating memory on host");
in = (int *) malloc(width * sizeof(int));
out = (int *) malloc(width * sizeof(int));
wbTime_stop(Generic, "Creating memory on host");
wbLog(TRACE, "HELLO Logger");
wbTime_start(IO, "Initializing host values");
for (ii = 0; ii < width; ii++) {
in[ii] = ii;
out[ii] = ii;
}
wbTime_stop(IO, "Initializing host values");
wbTime_start(GPU, "Doing GPU allocation + computation");
hipMalloc((void **) &d_in, width*sizeof(int));
hipMalloc((void **) &d_out, width*sizeof(int));
wbTime_start(Copy, "Copying memory to the device");
hipMemcpy(d_in, in, width * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_out, out, width * sizeof(int), hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying memory to the device");
dim3 blockDim(32);
dim3 gridDim(width/32);
wbTime_start(Compute, "Performing CUDA computation");
hipLaunchKernelGGL(( vecAdd), dim3(blockDim), dim3(gridDim), 0, 0, d_in, d_out, width);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying memory back from the device");
hipMemcpy(out, d_out, width * sizeof(int), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying memory back from the device");
wbTime_stop(GPU, "Doing GPU allocation + computation");
return 0;
}
| 7528897301074c4d70e1408a673343570b388613.cu |
#include <wb.h>
__global__ void vecAdd(int * in, int * out, int width) {
int index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < width)
out[index] += in[index];
}
int main(void) {
int ii;
int * in, * out;
int * d_in, * d_out;
int width = 1<<10;
wbTime_start(Generic, "Creating memory on host");
in = (int *) malloc(width * sizeof(int));
out = (int *) malloc(width * sizeof(int));
wbTime_stop(Generic, "Creating memory on host");
wbLog(TRACE, "HELLO Logger");
wbTime_start(IO, "Initializing host values");
for (ii = 0; ii < width; ii++) {
in[ii] = ii;
out[ii] = ii;
}
wbTime_stop(IO, "Initializing host values");
wbTime_start(GPU, "Doing GPU allocation + computation");
cudaMalloc((void **) &d_in, width*sizeof(int));
cudaMalloc((void **) &d_out, width*sizeof(int));
wbTime_start(Copy, "Copying memory to the device");
cudaMemcpy(d_in, in, width * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_out, out, width * sizeof(int), cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying memory to the device");
dim3 blockDim(32);
dim3 gridDim(width/32);
wbTime_start(Compute, "Performing CUDA computation");
vecAdd<<<blockDim, gridDim>>>(d_in, d_out, width);
cudaThreadSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying memory back from the device");
cudaMemcpy(out, d_out, width * sizeof(int), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying memory back from the device");
wbTime_stop(GPU, "Doing GPU allocation + computation");
return 0;
}
|
3d5a0b4ee24deb061eb90723bb0af885760e37a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* File: cuda_kmeans.cu (CUDA version) */
/* Description: Implementation of simple k-means clustering algorithm */
/* This program takes an array of N data objects, each with */
/* M coordinates and performs a k-means clustering given a */
/* user-provided value of the number of clusters (K). The */
/* clustering results are saved in 2 arrays: */
/* 1. a returned array of size [K][N] indicating the center */
/* coordinates of K clusters */
/* 2. membership[N] stores the cluster center ids, each */
/* corresponding to the cluster a data object is assigned */
/* */
/* Author: Wei-keng Liao */
/* ECE Department, Northwestern University */
/* email: [email protected] */
/* Copyright, 2005, Wei-keng Liao */
/* */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
// Copyright (c) 2005 Wei-keng Liao
// Copyright (c) 2011 Serban Giuroiu
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include "kmeans.h"
/*
static inline int nextPowerOfTwo(int n) {
n--;
n = n >> 1 | n;
n = n >> 2 | n;
n = n >> 4 | n;
n = n >> 8 | n;
n = n >> 16 | n;
// n = n >> 32 | n; // For 64-bit ints
return ++n;
}*/
/*----< euclid_dist_2() >----------------------------------------------------*/
/* square of Euclid distance between two multi-dimensional points */
__host__ __device__ inline static
float euclid_dist_2(int numCoords,
int numObjs,
int numClusters,
float *objects, // [numCoords][numObjs]
float *clusters, // [numCoords][numClusters]
int objectId,
int clusterId)
{
int i;
float ans=0.0;
for (i = 0; i < numCoords; i++) {
ans += (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]) *
(objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]);
}
return(ans);
}
/*----< find_nearest_cluster() >---------------------------------------------*/
__global__
void find_nearest_cluster(int numCoords,
int numObjs,
int numClusters,
float *objects, // [numCoords][numObjs]
float *deviceClusters, // [numCoords][numClusters]
int *membership, // [numObjs]
int *intermediates)
{
extern __shared__ char sharedMemory[];
// The type chosen for membershipChanged must be large enough to support
// reductions! There are blockDim.x elements, one for each thread in the
// block.
unsigned short *membershipChanged = (unsigned short *)sharedMemory;
float *clusters = (float *)(sharedMemory + blockDim.x * sizeof(unsigned short));
membershipChanged[threadIdx.x] = 0;
// BEWARE: We can overrun our shared memory here if there are too many
// clusters or too many coordinates!
for (int i = threadIdx.x; i < numClusters; i += blockDim.x) {
for (int j = 0; j < numCoords; j++) {
clusters[numClusters * j + i] = deviceClusters[numClusters * j + i];
}
}
__syncthreads();
int objectId = blockDim.x * blockIdx.x + threadIdx.x;
if (objectId < numObjs) {
int index, i;
float dist, min_dist;
/* find the cluster id that has min distance to object */
index = 0;
min_dist = euclid_dist_2(numCoords, numObjs, numClusters,
objects, clusters, objectId, 0);
for (i=1; i<numClusters; i++) {
dist = euclid_dist_2(numCoords, numObjs, numClusters,
objects, clusters, objectId, i);
/* no need square root */
if (dist < min_dist) { /* find the min and its array index */
min_dist = dist;
index = i;
}
}
if (membership[objectId] != index) {
membershipChanged[threadIdx.x] = 1;
}
/* assign the membership to object objectId */
membership[objectId] = index;
__syncthreads(); // For membershipChanged[]
// blockDim.x *must* be a power of two!
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
membershipChanged[threadIdx.x] +=
membershipChanged[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
intermediates[blockIdx.x] = membershipChanged[0];
}
}
}
// blockDim must be PowerOfTwo
__global__
void compute_delta(int *srcIntermediates, // The source to fetch the intermediates
int *dstIntermediates, // The destination to store the sum
int numIntermediates) // The actual number of intermediates
{
// The number of elements in this array should be equal to
// numIntermediates2, the number of threads launched. It *must* be a power
// of two!
extern __shared__ unsigned int intermediates[];
int tId = blockDim.x * blockIdx.x + threadIdx.x;
// Copy global intermediate values into shared memory.
intermediates[threadIdx.x] =
(tId < numIntermediates) ? srcIntermediates[tId] : 0;
__syncthreads();
// blockDim.x *must* be a power of two!
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
intermediates[threadIdx.x] += intermediates[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
dstIntermediates[blockIdx.x] = intermediates[0];
}
}
// blockDim must be PowerOfTwo
__global__
void update_centroids_clusterSize(float *objects, // [numCoords][numObjs]
int *membership, // [numObjs]
float *centroids, // [numBlks][numCoords][numClusters]
int *clusterSize, // [numBlks][numClusters]
int numCoords, int numObjs, int numClusters)
{
int tId = blockDim.x * blockIdx.x + threadIdx.x;
if(tId < numObjs){
int index = membership[tId];
// update cluster size
atomicAdd(&clusterSize[blockIdx.x*numClusters + index],1);
for(int j = 0; j < numCoords; j++){
int data_index = blockIdx.x*numCoords*numClusters+j*numClusters+index;
int object_index = j*numObjs+tId;
atomicAdd(¢roids[data_index],objects[object_index]);
}
}
}
/*
To be launched in 1 block, block size of numClusters
*/
__global__
void reduce_clusterSize(int *srcClusterSize, // [numSrcBlks][numClusters]
int *dstClusterSize, // [numClusters]
int numSrcBlks, int numClusters){
int tx = threadIdx.x;
for(int i=0; i < numSrcBlks; i++){
dstClusterSize[tx] += srcClusterSize[i*numClusters+tx];
}
}
/*
To be launched in numCoords block, block size of numClusters
*/
__global__
void reduce_centroids(float *srcCentroids, // [numSrcBlks][numCoords][numClusters]
float *dstCentroids, // [numCoords][numClusters]
int *clusterSize, // [numClusters]
int numSrcBlks, int numClusters, int numCoords)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
for(int i = 0; i < numSrcBlks; i++){
dstCentroids[bx*numClusters+tx] += srcCentroids[i*numCoords*numClusters+bx*numClusters+tx];
}
dstCentroids[bx*numClusters+tx] /= clusterSize[tx];
}
| 3d5a0b4ee24deb061eb90723bb0af885760e37a4.cu | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* File: cuda_kmeans.cu (CUDA version) */
/* Description: Implementation of simple k-means clustering algorithm */
/* This program takes an array of N data objects, each with */
/* M coordinates and performs a k-means clustering given a */
/* user-provided value of the number of clusters (K). The */
/* clustering results are saved in 2 arrays: */
/* 1. a returned array of size [K][N] indicating the center */
/* coordinates of K clusters */
/* 2. membership[N] stores the cluster center ids, each */
/* corresponding to the cluster a data object is assigned */
/* */
/* Author: Wei-keng Liao */
/* ECE Department, Northwestern University */
/* email: [email protected] */
/* Copyright, 2005, Wei-keng Liao */
/* */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
// Copyright (c) 2005 Wei-keng Liao
// Copyright (c) 2011 Serban Giuroiu
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include "kmeans.h"
/*
static inline int nextPowerOfTwo(int n) {
n--;
n = n >> 1 | n;
n = n >> 2 | n;
n = n >> 4 | n;
n = n >> 8 | n;
n = n >> 16 | n;
// n = n >> 32 | n; // For 64-bit ints
return ++n;
}*/
/*----< euclid_dist_2() >----------------------------------------------------*/
/* square of Euclid distance between two multi-dimensional points */
__host__ __device__ inline static
float euclid_dist_2(int numCoords,
int numObjs,
int numClusters,
float *objects, // [numCoords][numObjs]
float *clusters, // [numCoords][numClusters]
int objectId,
int clusterId)
{
int i;
float ans=0.0;
for (i = 0; i < numCoords; i++) {
ans += (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]) *
(objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]);
}
return(ans);
}
/*----< find_nearest_cluster() >---------------------------------------------*/
__global__
void find_nearest_cluster(int numCoords,
int numObjs,
int numClusters,
float *objects, // [numCoords][numObjs]
float *deviceClusters, // [numCoords][numClusters]
int *membership, // [numObjs]
int *intermediates)
{
extern __shared__ char sharedMemory[];
// The type chosen for membershipChanged must be large enough to support
// reductions! There are blockDim.x elements, one for each thread in the
// block.
unsigned short *membershipChanged = (unsigned short *)sharedMemory;
float *clusters = (float *)(sharedMemory + blockDim.x * sizeof(unsigned short));
membershipChanged[threadIdx.x] = 0;
// BEWARE: We can overrun our shared memory here if there are too many
// clusters or too many coordinates!
for (int i = threadIdx.x; i < numClusters; i += blockDim.x) {
for (int j = 0; j < numCoords; j++) {
clusters[numClusters * j + i] = deviceClusters[numClusters * j + i];
}
}
__syncthreads();
int objectId = blockDim.x * blockIdx.x + threadIdx.x;
if (objectId < numObjs) {
int index, i;
float dist, min_dist;
/* find the cluster id that has min distance to object */
index = 0;
min_dist = euclid_dist_2(numCoords, numObjs, numClusters,
objects, clusters, objectId, 0);
for (i=1; i<numClusters; i++) {
dist = euclid_dist_2(numCoords, numObjs, numClusters,
objects, clusters, objectId, i);
/* no need square root */
if (dist < min_dist) { /* find the min and its array index */
min_dist = dist;
index = i;
}
}
if (membership[objectId] != index) {
membershipChanged[threadIdx.x] = 1;
}
/* assign the membership to object objectId */
membership[objectId] = index;
__syncthreads(); // For membershipChanged[]
// blockDim.x *must* be a power of two!
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
membershipChanged[threadIdx.x] +=
membershipChanged[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
intermediates[blockIdx.x] = membershipChanged[0];
}
}
}
// blockDim must be PowerOfTwo
__global__
void compute_delta(int *srcIntermediates, // The source to fetch the intermediates
int *dstIntermediates, // The destination to store the sum
int numIntermediates) // The actual number of intermediates
{
// The number of elements in this array should be equal to
// numIntermediates2, the number of threads launched. It *must* be a power
// of two!
extern __shared__ unsigned int intermediates[];
int tId = blockDim.x * blockIdx.x + threadIdx.x;
// Copy global intermediate values into shared memory.
intermediates[threadIdx.x] =
(tId < numIntermediates) ? srcIntermediates[tId] : 0;
__syncthreads();
// blockDim.x *must* be a power of two!
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
intermediates[threadIdx.x] += intermediates[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
dstIntermediates[blockIdx.x] = intermediates[0];
}
}
// blockDim must be PowerOfTwo
__global__
void update_centroids_clusterSize(float *objects, // [numCoords][numObjs]
int *membership, // [numObjs]
float *centroids, // [numBlks][numCoords][numClusters]
int *clusterSize, // [numBlks][numClusters]
int numCoords, int numObjs, int numClusters)
{
int tId = blockDim.x * blockIdx.x + threadIdx.x;
if(tId < numObjs){
int index = membership[tId];
// update cluster size
atomicAdd(&clusterSize[blockIdx.x*numClusters + index],1);
for(int j = 0; j < numCoords; j++){
int data_index = blockIdx.x*numCoords*numClusters+j*numClusters+index;
int object_index = j*numObjs+tId;
atomicAdd(¢roids[data_index],objects[object_index]);
}
}
}
/*
To be launched in 1 block, block size of numClusters
*/
__global__
void reduce_clusterSize(int *srcClusterSize, // [numSrcBlks][numClusters]
int *dstClusterSize, // [numClusters]
int numSrcBlks, int numClusters){
int tx = threadIdx.x;
for(int i=0; i < numSrcBlks; i++){
dstClusterSize[tx] += srcClusterSize[i*numClusters+tx];
}
}
/*
To be launched in numCoords block, block size of numClusters
*/
__global__
void reduce_centroids(float *srcCentroids, // [numSrcBlks][numCoords][numClusters]
float *dstCentroids, // [numCoords][numClusters]
int *clusterSize, // [numClusters]
int numSrcBlks, int numClusters, int numCoords)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
for(int i = 0; i < numSrcBlks; i++){
dstCentroids[bx*numClusters+tx] += srcCentroids[i*numCoords*numClusters+bx*numClusters+tx];
}
dstCentroids[bx*numClusters+tx] /= clusterSize[tx];
}
|
aa4eef1266a757ef5ae1afdaf703708ba771b43f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MyCuda.h"
/**
* GPU Interleaved Addressing
* Interleaved Addressing
* 1.warp2.
* globalInputData
* globalOutputData
*/
__global__ void reduce0(float *globalInputData, float *globalOutputData, unsigned int n)
{
__shared__ float sdata[BLOCK_SIZE];
//
unsigned int tid = threadIdx.x;
unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
//
sdata[tid] = (index < n) ? globalInputData[index] : 0; // 0
__syncthreads(); // block
//
for (unsigned int s = 1; s < blockDim.x; s *= 2)
{
//
if ((tid % (2 * s)) == 0)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); //
}
//
if (tid == 0)
{
globalOutputData[blockIdx.x] = sdata[0];
}
}
/**
* reduce0
* fMatrix_Host
* iRow
* iCol
* @return
*/
float RuntimeOfReduce0(float *fMatrix_Host, const int iRow, const int iCol)
{
//
if (iRow <= 0 || iCol <= 0)
{
std::cout << "The size of the matrix is error!" << std::endl;
return 0.0;
}
float *fReuslt = (float*)malloc(sizeof(float));;
float *fMatrix_Device; //
int iMatrixSize = iRow * iCol; //
HANDLE_ERROR(hipMalloc((void**)&fMatrix_Device, iMatrixSize * sizeof(float))); //
HANDLE_ERROR(hipMemcpy(fMatrix_Device, fMatrix_Host, iMatrixSize * sizeof(float), hipMemcpyHostToDevice)); //
//
hipEvent_t start_GPU, end_GPU;
float elaspsedTime;
hipEventCreate(&start_GPU);
hipEventCreate(&end_GPU);
hipEventRecord(start_GPU, 0);
for (int i = 1, int iNum = iMatrixSize; i < iMatrixSize; i = i*BLOCK_SIZE)
{
int iBlockNum = (iNum + BLOCK_SIZE - 1) / BLOCK_SIZE;
hipLaunchKernelGGL(( reduce0), dim3(iBlockNum), dim3(BLOCK_SIZE), 0, 0, fMatrix_Device, fMatrix_Device, iNum);
iNum = iBlockNum;
}
HANDLE_ERROR(hipMemcpy(fReuslt, fMatrix_Device, sizeof(float), hipMemcpyDeviceToHost)); //
//
hipEventRecord(end_GPU, 0);
hipEventSynchronize(end_GPU);
hipEventElapsedTime(&elaspsedTime, start_GPU, end_GPU);
hipEventDestroy(start_GPU);
hipEventDestroy(end_GPU);
std::cout << "Reduce0 " << elaspsedTime << "ms." << std::endl;
HANDLE_ERROR(hipFree(fMatrix_Device));//
return fReuslt[0];
} | aa4eef1266a757ef5ae1afdaf703708ba771b43f.cu | #include "MyCuda.h"
/**
* 在GPU利用 Interleaved Addressing 法上计算数组的和
* Interleaved Addressing 的核心思想在于交错寻址,即典型的树状模型
* 问题:1.每个warp中都会出现分支,效率低;2.取余操作效率较低
* globalInputData 输入数据,位于全局内存
* globalOutputData 输出数据,位于全局内存
*/
__global__ void reduce0(float *globalInputData, float *globalOutputData, unsigned int n)
{
__shared__ float sdata[BLOCK_SIZE];
// 坐标索引
unsigned int tid = threadIdx.x;
unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
// 数据读入共享内存
sdata[tid] = (index < n) ? globalInputData[index] : 0; // 超出范围的置0
__syncthreads(); // 同步,等待同一个block内的数据都拷贝到共享内存
// 在共享内存中对每一个块进行规约计算
for (unsigned int s = 1; s < blockDim.x; s *= 2)
{
// 取余操作速度很慢
if ((tid % (2 * s)) == 0)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // 等待每一次的迭代计算完成
}
// 把计算结果从共享内存写回全局内存
if (tid == 0)
{
globalOutputData[blockIdx.x] = sdata[0];
}
}
/**
* 计算reduce0函数的时间
* fMatrix_Host 矩阵头指针
* iRow 矩阵行数
* iCol 矩阵列数
* @return 和
*/
float RuntimeOfReduce0(float *fMatrix_Host, const int iRow, const int iCol)
{
// 检查矩阵维度是否正确
if (iRow <= 0 || iCol <= 0)
{
std::cout << "The size of the matrix is error!" << std::endl;
return 0.0;
}
float *fReuslt = (float*)malloc(sizeof(float));;
float *fMatrix_Device; // 指向设备显存
int iMatrixSize = iRow * iCol; // 矩阵元素个数
HANDLE_ERROR(cudaMalloc((void**)&fMatrix_Device, iMatrixSize * sizeof(float))); // 在显存中为矩阵开辟空间
HANDLE_ERROR(cudaMemcpy(fMatrix_Device, fMatrix_Host, iMatrixSize * sizeof(float), cudaMemcpyHostToDevice)); // 将数据拷贝到显存
// 记录起始时间
cudaEvent_t start_GPU, end_GPU;
float elaspsedTime;
cudaEventCreate(&start_GPU);
cudaEventCreate(&end_GPU);
cudaEventRecord(start_GPU, 0);
for (int i = 1, int iNum = iMatrixSize; i < iMatrixSize; i = i*BLOCK_SIZE)
{
int iBlockNum = (iNum + BLOCK_SIZE - 1) / BLOCK_SIZE;
reduce0<<<iBlockNum, BLOCK_SIZE>>>(fMatrix_Device, fMatrix_Device, iNum);
iNum = iBlockNum;
}
HANDLE_ERROR(cudaMemcpy(fReuslt, fMatrix_Device, sizeof(float), cudaMemcpyDeviceToHost)); // 将数据拷贝到内存
// 计时结束
cudaEventRecord(end_GPU, 0);
cudaEventSynchronize(end_GPU);
cudaEventElapsedTime(&elaspsedTime, start_GPU, end_GPU);
cudaEventDestroy(start_GPU);
cudaEventDestroy(end_GPU);
std::cout << "Reduce0 的运行时间为:" << elaspsedTime << "ms." << std::endl;
HANDLE_ERROR(cudaFree(fMatrix_Device));// 释放显存空间
return fReuslt[0];
} |
4c8244cb3a8b74b5af01779a6efcd999619636a8.hip | // !!! This is a file automatically generated by hipify!!!
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "ppl/common/types.h"
#include "cudakernel/nn/conv/group_padding.h"
#include "cudakernel/common/divmod_fast.h"
#include "conv_common.h"
template<typename T>
__global__ void group_padding(T *output, T *input, uint64_t out_size, const int num_grp, const int num_chl_per_grp, const int num_chl_pad, int num_chl_per_grp_pad){
uint64_t out_off = blockIdx.x*blockDim.x + threadIdx.x;
// in this case, num_chl_per_grp is naturally not aligned with padding size,
// so we just use T to access memory.
T value = 0;
int chl_id_in_grp = out_off % (num_chl_per_grp_pad);// FIXME magic
uint64_t nhw_id = out_off / (num_chl_per_grp_pad * num_grp);// FIXME magic
int total_chl_id = out_off - nhw_id*num_chl_per_grp_pad*num_grp;
int grp_id = total_chl_id / num_chl_per_grp_pad;
uint64_t in_off = nhw_id*num_chl_pad + grp_id*num_chl_per_grp + chl_id_in_grp;
if(out_off < out_size){
if(chl_id_in_grp < num_chl_per_grp) value = input[in_off];
output[out_off] = value;
}
}
template<typename T>
__global__ void split_group(
T *output,
T *input,
DivModFast fast_div_channel,
uint64_t out_size,
const int num_grp,
const int num_chl_per_grp,
const int num_chl,
int num_chl_per_grp_pad)
{
int32_t out_off = blockIdx.x * blockDim.x + threadIdx.x;
if (out_off >= out_size) return;
int32_t channel = fast_div_channel.mod(out_off);
bool in_range = channel < num_chl_per_grp;
int32_t nhw_id = out_off / (num_chl_per_grp_pad * num_grp);
int32_t grp_id = (fast_div_channel.div(out_off)) % num_grp;
int32_t in_off = nhw_id * num_chl + grp_id * num_chl_per_grp + channel;
T value = in_range ? input[in_off] : T(0);
output[out_off] = value;
}
template<typename T>
__global__ void merge_group(
T *output,
T *input,
DivModFast fast_div_channel,
uint64_t out_size,
const int num_grp,
const int num_chl_per_grp,
const int num_chl,
int num_chl_per_grp_pad,
int flt_align)
{
int32_t out_off = blockIdx.x * blockDim.x + threadIdx.x;
if (out_off >= out_size) return;
int32_t channel = fast_div_channel.mod(out_off);
int32_t nhw_id = out_off / (flt_align);
int chl_id = out_off % flt_align;
int32_t grp_id = (fast_div_channel.div(out_off)) % num_grp;
int32_t in_off = nhw_id * num_grp * num_chl_per_grp_pad + grp_id * num_chl_per_grp_pad + channel;
output[out_off] = chl_id < num_chl ? input[in_off] : T(0);
}
template<typename T>
__global__ void flt_group_padding(T *output, T *input, unsigned int in_size_per_grp, const int num_grp,
int num_chl_per_grp_pad, unsigned int out_size_per_grp){
unsigned int in_off = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int grp_id = blockIdx.y;
bool in_range = (in_off < in_size_per_grp);
T value = in_range? input[in_off + grp_id*in_size_per_grp] : (T)0;
unsigned int c_id = in_off % num_chl_per_grp_pad;
unsigned int nhw_id = in_off / num_chl_per_grp_pad;
unsigned int out_off = nhw_id*num_chl_per_grp_pad + grp_id * out_size_per_grp + c_id;
if(in_range) output[out_off] = value;
}
void PPLCUDAConvolutionCvtFlt(
hipStream_t &stream,
void* output,
const void* input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int flt_num = conv_param.num_flt;
const int num_chl = conv_param.num_chl;
const int flt_height = conv_param.flt_height;
const int flt_width = conv_param.flt_width;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_chl_per_grp = num_chl / num_grp;
int num_chl_per_grp_pad = Align(num_chl_per_grp, align_size);
int num_flt_per_grp = flt_num / num_grp;
int num_flt_per_grp_pad = Align(num_flt_per_grp, align_size);
const int cta_size = 512;
dim3 grid;
int in_size_per_grp = flt_num/num_grp * flt_height * flt_width * num_chl_per_grp_pad;
int out_size_per_grp = num_flt_per_grp_pad * flt_height * flt_width * num_chl_per_grp_pad;
grid.x = DivUp(in_size_per_grp, cta_size);
grid.y = num_grp;
grid.z = 1;
if (type == ppl::common::DATATYPE_FLOAT32) {
hipMemset(output, 0, sizeof(float) * num_grp * out_size_per_grp);
hipLaunchKernelGGL(( flt_group_padding<float>), dim3(grid), dim3(cta_size), 0, stream, (float*)output, (float*)input, in_size_per_grp, num_grp, num_chl_per_grp_pad,
out_size_per_grp);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipMemset(output, 0, sizeof(half) * num_grp * out_size_per_grp);
hipLaunchKernelGGL(( flt_group_padding<__half>), dim3(grid), dim3(cta_size), 0, stream, (__half*)output, (__half*)input, in_size_per_grp, num_grp, num_chl_per_grp_pad,
out_size_per_grp);
}
}
void PPLCUDAConvolutionCvtInput(
hipStream_t &stream,
void* output,
const void* input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int in_num = conv_param.in_num;
const int num_chl = conv_param.num_chl;
const int in_height = conv_param.in_height;
const int in_width = conv_param.in_width;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_chl_per_grp = num_chl / num_grp;
int num_chl_per_grp_pad = Align(num_chl_per_grp, align_size);
const int cta_size = 512;
uint64_t out_size = in_num * in_height * in_width * num_chl_per_grp_pad * num_grp;
DivModFast fast_div_channel(num_chl_per_grp_pad);
dim3 grid(DivUp(out_size, cta_size), 1, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( split_group<float>), dim3(grid), dim3(cta_size), 0, stream, (float*)output, (float*)input, fast_div_channel,
out_size, num_grp, num_chl_per_grp, num_chl, num_chl_per_grp_pad);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( split_group<__half>), dim3(grid), dim3(cta_size), 0, stream, (__half*)output, (__half*)input, fast_div_channel,
out_size, num_grp, num_chl_per_grp, num_chl, num_chl_per_grp_pad);
}
}
void PPLCUDAConvolutionCvtOutput(
hipStream_t &stream,
void* output,
const void* input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int in_num = conv_param.in_num;
const int num_flt = conv_param.num_flt;
const int out_height = conv_param.out_height;
const int out_width = conv_param.out_width;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_flt_per_grp = num_flt / num_grp;// FIXME magic
int num_flt_per_grp_pad = Align(num_flt_per_grp, align_size);
int flt_align = Align(num_flt, align_size);
const int cta_size = 512;
uint64_t out_size = in_num * out_height * out_width * flt_align;
DivModFast fast_div_channel(num_flt_per_grp);
dim3 grid(DivUp(out_size, cta_size), 1, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( merge_group<float>), dim3(grid), dim3(cta_size), 0, stream, (float*)output, (float*)input, fast_div_channel,
out_size, num_grp, num_flt_per_grp, num_flt, num_flt_per_grp_pad, flt_align);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( merge_group<__half>), dim3(grid), dim3(cta_size), 0, stream, (__half*)output, (__half*)input, fast_div_channel,
out_size, num_grp, num_flt_per_grp, num_flt, num_flt_per_grp_pad, flt_align);
}
}
void PPLCUDAConvolutionCvtBias(
hipStream_t &stream,
void* output,
const void* input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int flt_num = conv_param.num_flt;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_flt_per_grp = flt_num / num_grp;
int num_flt_per_grp_pad = Align(num_flt_per_grp, align_size);
const int cta_size = 256;
dim3 grid;
int out_size = num_flt_per_grp_pad*num_grp;
//int in_size = conv_param.num_flt_pad;
grid.x = DivUp(out_size, cta_size);
grid.y = 1;
grid.z = 1;
if (type == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( group_padding<float>), dim3(grid), dim3(cta_size), 0, stream,
(float*)output, (float*)input,
out_size, num_grp,
num_flt_per_grp, conv_param.num_flt_pad, num_flt_per_grp_pad);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( group_padding<__half>), dim3(grid), dim3(cta_size), 0, stream,
(__half*)output, (__half*)input,
out_size, num_grp,
num_flt_per_grp, conv_param.num_flt_pad, num_flt_per_grp_pad);
}
}
| 4c8244cb3a8b74b5af01779a6efcd999619636a8.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <cuda.h>
#include <cuda_fp16.h>
#include "ppl/common/types.h"
#include "cudakernel/nn/conv/group_padding.h"
#include "cudakernel/common/divmod_fast.h"
#include "conv_common.h"
template<typename T>
__global__ void group_padding(T *output, T *input, uint64_t out_size, const int num_grp, const int num_chl_per_grp, const int num_chl_pad, int num_chl_per_grp_pad){
uint64_t out_off = blockIdx.x*blockDim.x + threadIdx.x;
// in this case, num_chl_per_grp is naturally not aligned with padding size,
// so we just use T to access memory.
T value = 0;
int chl_id_in_grp = out_off % (num_chl_per_grp_pad);// FIXME magic
uint64_t nhw_id = out_off / (num_chl_per_grp_pad * num_grp);// FIXME magic
int total_chl_id = out_off - nhw_id*num_chl_per_grp_pad*num_grp;
int grp_id = total_chl_id / num_chl_per_grp_pad;
uint64_t in_off = nhw_id*num_chl_pad + grp_id*num_chl_per_grp + chl_id_in_grp;
if(out_off < out_size){
if(chl_id_in_grp < num_chl_per_grp) value = input[in_off];
output[out_off] = value;
}
}
template<typename T>
__global__ void split_group(
T *output,
T *input,
DivModFast fast_div_channel,
uint64_t out_size,
const int num_grp,
const int num_chl_per_grp,
const int num_chl,
int num_chl_per_grp_pad)
{
int32_t out_off = blockIdx.x * blockDim.x + threadIdx.x;
if (out_off >= out_size) return;
int32_t channel = fast_div_channel.mod(out_off);
bool in_range = channel < num_chl_per_grp;
int32_t nhw_id = out_off / (num_chl_per_grp_pad * num_grp);
int32_t grp_id = (fast_div_channel.div(out_off)) % num_grp;
int32_t in_off = nhw_id * num_chl + grp_id * num_chl_per_grp + channel;
T value = in_range ? input[in_off] : T(0);
output[out_off] = value;
}
template<typename T>
__global__ void merge_group(
T *output,
T *input,
DivModFast fast_div_channel,
uint64_t out_size,
const int num_grp,
const int num_chl_per_grp,
const int num_chl,
int num_chl_per_grp_pad,
int flt_align)
{
int32_t out_off = blockIdx.x * blockDim.x + threadIdx.x;
if (out_off >= out_size) return;
int32_t channel = fast_div_channel.mod(out_off);
int32_t nhw_id = out_off / (flt_align);
int chl_id = out_off % flt_align;
int32_t grp_id = (fast_div_channel.div(out_off)) % num_grp;
int32_t in_off = nhw_id * num_grp * num_chl_per_grp_pad + grp_id * num_chl_per_grp_pad + channel;
output[out_off] = chl_id < num_chl ? input[in_off] : T(0);
}
template<typename T>
__global__ void flt_group_padding(T *output, T *input, unsigned int in_size_per_grp, const int num_grp,
int num_chl_per_grp_pad, unsigned int out_size_per_grp){
unsigned int in_off = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int grp_id = blockIdx.y;
bool in_range = (in_off < in_size_per_grp);
T value = in_range? input[in_off + grp_id*in_size_per_grp] : (T)0;
unsigned int c_id = in_off % num_chl_per_grp_pad;
unsigned int nhw_id = in_off / num_chl_per_grp_pad;
unsigned int out_off = nhw_id*num_chl_per_grp_pad + grp_id * out_size_per_grp + c_id;
if(in_range) output[out_off] = value;
}
void PPLCUDAConvolutionCvtFlt(
cudaStream_t &stream,
void* output,
const void* input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int flt_num = conv_param.num_flt;
const int num_chl = conv_param.num_chl;
const int flt_height = conv_param.flt_height;
const int flt_width = conv_param.flt_width;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_chl_per_grp = num_chl / num_grp;
int num_chl_per_grp_pad = Align(num_chl_per_grp, align_size);
int num_flt_per_grp = flt_num / num_grp;
int num_flt_per_grp_pad = Align(num_flt_per_grp, align_size);
const int cta_size = 512;
dim3 grid;
int in_size_per_grp = flt_num/num_grp * flt_height * flt_width * num_chl_per_grp_pad;
int out_size_per_grp = num_flt_per_grp_pad * flt_height * flt_width * num_chl_per_grp_pad;
grid.x = DivUp(in_size_per_grp, cta_size);
grid.y = num_grp;
grid.z = 1;
if (type == ppl::common::DATATYPE_FLOAT32) {
cudaMemset(output, 0, sizeof(float) * num_grp * out_size_per_grp);
flt_group_padding<float><<<grid, cta_size, 0, stream>>>((float*)output, (float*)input, in_size_per_grp, num_grp, num_chl_per_grp_pad,
out_size_per_grp);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
cudaMemset(output, 0, sizeof(half) * num_grp * out_size_per_grp);
flt_group_padding<__half><<<grid, cta_size, 0, stream>>>((__half*)output, (__half*)input, in_size_per_grp, num_grp, num_chl_per_grp_pad,
out_size_per_grp);
}
}
void PPLCUDAConvolutionCvtInput(
cudaStream_t &stream,
void* output,
const void* input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int in_num = conv_param.in_num;
const int num_chl = conv_param.num_chl;
const int in_height = conv_param.in_height;
const int in_width = conv_param.in_width;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_chl_per_grp = num_chl / num_grp;
int num_chl_per_grp_pad = Align(num_chl_per_grp, align_size);
const int cta_size = 512;
uint64_t out_size = in_num * in_height * in_width * num_chl_per_grp_pad * num_grp;
DivModFast fast_div_channel(num_chl_per_grp_pad);
dim3 grid(DivUp(out_size, cta_size), 1, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
split_group<float><<<grid, cta_size, 0, stream>>>((float*)output, (float*)input, fast_div_channel,
out_size, num_grp, num_chl_per_grp, num_chl, num_chl_per_grp_pad);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
split_group<__half><<<grid, cta_size, 0, stream>>>((__half*)output, (__half*)input, fast_div_channel,
out_size, num_grp, num_chl_per_grp, num_chl, num_chl_per_grp_pad);
}
}
void PPLCUDAConvolutionCvtOutput(
cudaStream_t &stream,
void* output,
const void* input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int in_num = conv_param.in_num;
const int num_flt = conv_param.num_flt;
const int out_height = conv_param.out_height;
const int out_width = conv_param.out_width;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_flt_per_grp = num_flt / num_grp;// FIXME magic
int num_flt_per_grp_pad = Align(num_flt_per_grp, align_size);
int flt_align = Align(num_flt, align_size);
const int cta_size = 512;
uint64_t out_size = in_num * out_height * out_width * flt_align;
DivModFast fast_div_channel(num_flt_per_grp);
dim3 grid(DivUp(out_size, cta_size), 1, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
merge_group<float><<<grid, cta_size, 0, stream>>>((float*)output, (float*)input, fast_div_channel,
out_size, num_grp, num_flt_per_grp, num_flt, num_flt_per_grp_pad, flt_align);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
merge_group<__half><<<grid, cta_size, 0, stream>>>((__half*)output, (__half*)input, fast_div_channel,
out_size, num_grp, num_flt_per_grp, num_flt, num_flt_per_grp_pad, flt_align);
}
}
void PPLCUDAConvolutionCvtBias(
cudaStream_t &stream,
void* output,
const void* input,
ppl::common::datatype_t type,
conv_param_t &conv_param)
{
const int flt_num = conv_param.num_flt;
const int num_grp = conv_param.num_grp;
int align_size = GetPadSize(type);
int num_flt_per_grp = flt_num / num_grp;
int num_flt_per_grp_pad = Align(num_flt_per_grp, align_size);
const int cta_size = 256;
dim3 grid;
int out_size = num_flt_per_grp_pad*num_grp;
//int in_size = conv_param.num_flt_pad;
grid.x = DivUp(out_size, cta_size);
grid.y = 1;
grid.z = 1;
if (type == ppl::common::DATATYPE_FLOAT32) {
group_padding<float><<<grid, cta_size, 0, stream>>>(
(float*)output, (float*)input,
out_size, num_grp,
num_flt_per_grp, conv_param.num_flt_pad, num_flt_per_grp_pad);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
group_padding<__half><<<grid, cta_size, 0, stream>>>(
(__half*)output, (__half*)input,
out_size, num_grp,
num_flt_per_grp, conv_param.num_flt_pad, num_flt_per_grp_pad);
}
}
|
bf1b4410ecfd4bc534a3b65ba819162fe7bcbf6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void createVertices(float4* positions, float time, unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// Calculate uv coordinates
float u = x / (float)width;
float v = y / (float)height;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
// Calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u * freq + time) * cosf(v * freq + time) * 0.5f;
//write positions
positions[y*width + x] = make_float4(u, w, v, __int_as_float_(0xff00ff00));
} | bf1b4410ecfd4bc534a3b65ba819162fe7bcbf6a.cu | __global__ void createVertices(float4* positions, float time, unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// Calculate uv coordinates
float u = x / (float)width;
float v = y / (float)height;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
// Calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u * freq + time) * cosf(v * freq + time) * 0.5f;
//write positions
positions[y*width + x] = make_float4(u, w, v, __int_as_float_(0xff00ff00));
} |
91b7250ddceab1530d74f6c2ae6c731345b8b30e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Refactor host function to run as CUDA kernel
*/
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addArraysInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *array, int N)
{
for(int i = 0; i < N; i++)
{
if(array[i] != target)
{
printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
hipError_t addArraysErr;
hipError_t asyncErr;
/*
* Launch kernels.
*/
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 3, a, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 4, b, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 0, c, N);
/*
* Now that initialization is happening on a GPU, host code
* must be synchronized to wait for its completion.
*/
hipDeviceSynchronize();
hipLaunchKernelGGL(( addArraysInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
addArraysErr = hipGetLastError();
if(addArraysErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addArraysErr));
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
checkElementsAre(7, c, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
| 91b7250ddceab1530d74f6c2ae6c731345b8b30e.cu | #include <stdio.h>
/*
* Refactor host function to run as CUDA kernel
*/
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addArraysInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *array, int N)
{
for(int i = 0; i < N; i++)
{
if(array[i] != target)
{
printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
cudaError_t addArraysErr;
cudaError_t asyncErr;
/*
* Launch kernels.
*/
initWith<<<numberOfBlocks, threadsPerBlock>>>(3, a, N);
initWith<<<numberOfBlocks, threadsPerBlock>>>(4, b, N);
initWith<<<numberOfBlocks, threadsPerBlock>>>(0, c, N);
/*
* Now that initialization is happening on a GPU, host code
* must be synchronized to wait for its completion.
*/
cudaDeviceSynchronize();
addArraysInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
addArraysErr = cudaGetLastError();
if(addArraysErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addArraysErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
0564a8b5f8753f7b6563d1434014c827e83d1b8f.hip | // !!! This is a file automatically generated by hipify!!!
/*
@modifier : Nilanka Manoj
@compile : nvcc vecadd.cu -o build/vecadd
@run : ./build/vecadd <<n>>
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <hip/hip_runtime.h>
double *a, *b;
double *c, *c2;
__global__ void vecAdd(double *A, double *B, double *C, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
void vecAdd_h(double *A1, double *B1, double *C1, double N)
{
for (int i = 0; i < N; i++)
C1[i] = A1[i] + B1[i];
}
int main(int argc, char **argv)
{
if (argc == 2)
{
printf("=====================round strating==========================\n");
int n = atoi(argv[1]);
int nBytes = n * sizeof(double);
int block_size, block_no;
a = (double *)malloc(nBytes);
b = (double *)malloc(nBytes);
c = (double *)malloc(nBytes);
c2 = (double *)malloc(nBytes);
double *a_d, *b_d, *c_d;
block_size = 768;
block_no = (int)ceil(n / block_size) + 1;
for (int i = 0; i < n; i++)
{
a[i] = sin(i) * sin(i);
b[i] = cos(i) * cos(i);
c[i] = 0;
c2[i] = 0;
}
printf("Allocating device memory on host..\n");
hipMalloc((void **)&a_d, n * sizeof(double));
hipMalloc((void **)&b_d, n * sizeof(double));
hipMalloc((void **)&c_d, n * sizeof(double));
printf("Copying to device..\n");
hipMemcpy(a_d, a, n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(b_d, b, n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(c_d, c, n * sizeof(double), hipMemcpyHostToDevice);
printf("Doing GPU Vector add\n");
clock_t start_d = clock();
hipLaunchKernelGGL(( vecAdd), dim3(block_no), dim3(block_size), 0, 0, a_d, b_d, c_d, n);
hipDeviceSynchronize();
clock_t end_d = clock();
printf("Doing CPU Vector add\n");
clock_t start_h = clock();
vecAdd_h(a, b, c2, n);
clock_t end_h = clock();
double time_d = (double)(end_d - start_d) / CLOCKS_PER_SEC;
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
hipMemcpy(c, c_d, n * sizeof(double), hipMemcpyDeviceToHost);
printf("Number of elements: %d GPU Time: %f CPU Time: %f\n", n, time_d, time_h);
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
int e = memcmp(c, c2, n);
printf("compaired error : %d\n", e);
}
else
{
printf("invalid arguments\n");
}
return 0;
} | 0564a8b5f8753f7b6563d1434014c827e83d1b8f.cu | /*
@modifier : Nilanka Manoj
@compile : nvcc vecadd.cu -o build/vecadd
@run : ./build/vecadd <<n>>
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <cuda.h>
double *a, *b;
double *c, *c2;
__global__ void vecAdd(double *A, double *B, double *C, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
void vecAdd_h(double *A1, double *B1, double *C1, double N)
{
for (int i = 0; i < N; i++)
C1[i] = A1[i] + B1[i];
}
int main(int argc, char **argv)
{
if (argc == 2)
{
printf("=====================round strating==========================\n");
int n = atoi(argv[1]);
int nBytes = n * sizeof(double);
int block_size, block_no;
a = (double *)malloc(nBytes);
b = (double *)malloc(nBytes);
c = (double *)malloc(nBytes);
c2 = (double *)malloc(nBytes);
double *a_d, *b_d, *c_d;
block_size = 768;
block_no = (int)ceil(n / block_size) + 1;
for (int i = 0; i < n; i++)
{
a[i] = sin(i) * sin(i);
b[i] = cos(i) * cos(i);
c[i] = 0;
c2[i] = 0;
}
printf("Allocating device memory on host..\n");
cudaMalloc((void **)&a_d, n * sizeof(double));
cudaMalloc((void **)&b_d, n * sizeof(double));
cudaMalloc((void **)&c_d, n * sizeof(double));
printf("Copying to device..\n");
cudaMemcpy(a_d, a, n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c, n * sizeof(double), cudaMemcpyHostToDevice);
printf("Doing GPU Vector add\n");
clock_t start_d = clock();
vecAdd<<<block_no, block_size>>>(a_d, b_d, c_d, n);
cudaThreadSynchronize();
clock_t end_d = clock();
printf("Doing CPU Vector add\n");
clock_t start_h = clock();
vecAdd_h(a, b, c2, n);
clock_t end_h = clock();
double time_d = (double)(end_d - start_d) / CLOCKS_PER_SEC;
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
cudaMemcpy(c, c_d, n * sizeof(double), cudaMemcpyDeviceToHost);
printf("Number of elements: %d GPU Time: %f CPU Time: %f\n", n, time_d, time_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
int e = memcmp(c, c2, n);
printf("compaired error : %d\n", e);
}
else
{
printf("invalid arguments\n");
}
return 0;
} |
a3121c616c61daae6b01e0c0f8898c5c900ffa62.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 16
// Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// Buffers that represent the reshuffled versions of pos/vel1/vel2
glm::vec3 *dev_posCpy;
glm::vec3 *dev_vel1Cpy;
glm::vec3 *dev_vel2Cpy;
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
// Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* This is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray << <fullBlocksPerGrid, blockSize >> > (1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// Computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// Additional buffers for scattered grid
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
// Additional buffers for coherent grid
hipMalloc((void**)&dev_posCpy, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_posCpy failed!");
hipMalloc((void**)&dev_vel1Cpy, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1Cpy failed!");
hipMalloc((void**)&dev_vel2Cpy, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2Cpy failed!");
// Wrap the key/value buffers around the thrust pointers
dev_thrust_particleArrayIndices = thrust::device_pointer_cast(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_pointer_cast(dev_particleGridIndices);
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 alignment(0.0f);
glm::vec3 separation(0.0f);
glm::vec3 cohesion(0.0f);
glm::vec3 deltaVel(0.0f);
float alignmentCount = 0.0f;
float cohesionCount = 0.0f;
for (int i = 0; i < N; i++) {
if (i == iSelf) {
continue;
}
glm::vec3 otherPos = pos[i];
float distance = glm::distance(pos[iSelf], otherPos);
// Rule 1 - Alignment: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance) {
cohesion += otherPos;
++cohesionCount;
}
// Rule 2 - Separation: boids try to stay a distance d away from each other
if (distance < rule2Distance) {
separation -= otherPos - pos[iSelf];
}
// Rule 3 - Cohesion: boids try to match the speed of surrounding boids
if (distance < rule3Distance) {
alignment += vel[i];
++alignmentCount;
}
}
// Average out the cohesion velocity and scale it
if (cohesionCount > 0) {
cohesion /= cohesionCount;
cohesion = (cohesion - pos[iSelf]) * rule1Scale;
deltaVel += cohesion;
}
// Scale the separation velocity
separation *= rule2Scale;
deltaVel += separation;
// Average out the cohesion velocity and scale it
if (alignmentCount > 0) {
alignment *= rule3Scale / alignmentCount;
deltaVel += alignment;
}
return deltaVel;
}
/**
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// Compute new velocity and clamp it
glm::vec3 deltaVel = computeVelocityChange(N, index, pos, vel1);
glm::vec3 newVel = vel1[index] + deltaVel;
float newSpeed = glm::length(newVel);
newVel = newSpeed > maxSpeed ? glm::normalize(newVel) * maxSpeed : newVel;
// Record the new velocity into vel2. Question: why NOT vel1? Answer: because vel1 always contains
// the velocity of the previous frame update. After updating the current frame with the new velocity (vel2)
// we set vel1 = vel2, which is the entire purpose of ping-pong velocity
vel2[index] = newVel;
}
/**
* Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// Consider this method of computing a 1D index from a 3D grid index.
// Since memory is contiguous, it is best to iterate over step by step
// So for z then y then x is better (since x goes 1 index by 1 index)
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
glm::vec3 offsetPos = pos[index] + glm::vec3(scene_scale); // all boids are now in [0, 2 * scene_scale], makes indexing easier
offsetPos *= inverseCellWidth; // how many cell width is each vector component
glm::ivec3 cellIndex(glm::floor(offsetPos));
gridIndices[index] = gridIndex3Dto1D(cellIndex.x, cellIndex.y, cellIndex.z, gridResolution);
// ith boid has its data in ith position in pos/vel1/vel2 arrays (trivially, but important since we will sort)
indices[index] = index;
}
}
// Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
/* This kernel is called after sorting is done, which means that a threadId will represent some boid
and not necessarily the threadIdth boid (because its a permutation)
*/
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices,
int *gridCellEndIndices) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x; // ach index represents some boid
if (i > 0 && i < N) {
int cell = particleGridIndices[i]; // The cell of the ith boid
int prevCell = particleGridIndices[i - 1]; // The cell of the i - 1th boid
if (prevCell != cell) {
// If the cells are not the same, then we have a new cell! (and end the previous one)
gridCellStartIndices[cell] = i;
gridCellEndIndices[prevCell] = i - 1;
}
} else if (i == 0) {
gridCellStartIndices[particleGridIndices[0]] = 0;
}
if (i == N - 1) {
gridCellEndIndices[particleGridIndices[N-1]] = N - 1;
}
}
/* This determines which octant of the cell the boid is in. E.g if x coord > 0.5f, then the boid is in the right half
of the cell, and the same logic applies to y and z to determine an octant. An octant is represented by a vec3
(direction). I use the vec3 to compute the offset from the original cellIndex to get the other 7 neighbors
*/
__device__ void findCellNeighbors(int* outNeighborGrids, glm::vec3 offsetPos, int gridResolution, int cellIndex, int cellIndexMin, int cellIndexMax) {
glm::ivec3 direction(1, 1, 1);
offsetPos -= glm::floor(offsetPos);
if (offsetPos.x < 0.5f) {
direction.x = -1;
}
if (offsetPos.y < 0.5f) {
direction.y = -1;
}
if (offsetPos.z < 0.5f) {
direction.z = -1;
}
// Neighbors are ordered at a different order (from lowest index to highest index)
outNeighborGrids[0] = cellIndex;
outNeighborGrids[1] = cellIndex + direction.x;
outNeighborGrids[2] = cellIndex + direction.y * gridResolution;
outNeighborGrids[3] = cellIndex + direction.x + direction.y * gridResolution;
outNeighborGrids[4] = cellIndex + direction.z * gridResolution * gridResolution;
outNeighborGrids[5] = cellIndex + direction.x + direction.z * gridResolution * gridResolution;
outNeighborGrids[6] = cellIndex + direction.y * gridResolution + direction.z * gridResolution * gridResolution;
outNeighborGrids[7] = cellIndex + direction.x + direction.y * gridResolution + direction.z * gridResolution * gridResolution;
for (int i = 0; i < 8; i++) {
if (outNeighborGrids[i] > cellIndexMax || outNeighborGrids[i] < cellIndexMin) {
outNeighborGrids[i] = -1;
}
}
return;
}
__global__ void kernUpdateVelNeighborSearchScattered(int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[particleArrayIndices[index]] + glm::vec3(scene_scale);
thisPos *= inverseCellWidth;
glm::ivec3 gridIndex(glm::floor(thisPos));
int gridCell = gridIndex3Dto1D(gridIndex.x, gridIndex.y, gridIndex.z, gridResolution);
// Identify neighboring cells
glm::vec3 gridMax((gridResolution - 1) * cellWidth);
int maxCell = gridIndex3Dto1D(gridMax.x, gridMax.y, gridMax.z, gridResolution);
int minCell = 0;
int neighbors[8];
findCellNeighbors(neighbors, thisPos, gridResolution, gridCell, minCell, maxCell);
// Compute delta vel
glm::vec3 alignment(0.0f);
glm::vec3 separation(0.0f);
glm::vec3 cohesion(0.0f);
glm::vec3 deltaVel(0.0f);
int alignmentCount = 0;
int cohesionCount = 0;
for (int i = 0; i < 8; i++) {
int neighborIndex = neighbors[i];
if (neighborIndex != -1) {
int start = gridCellStartIndices[neighborIndex];
int end = gridCellEndIndices[neighborIndex];
if (start != -1 && end != -1) {
for (int j = start; j <= end; j++) {
if (j != index) {
glm::vec3 otherPos = pos[particleArrayIndices[j]];
float distance = glm::length(pos[particleArrayIndices[index]] - otherPos);
// Rule 1 - Cohesion: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance) {
cohesion += otherPos;
++cohesionCount;
}
// Rule 2 - Separation: boids try to stay a distance d away from each other
if (distance < rule2Distance) {
separation -= otherPos - pos[particleArrayIndices[index]];
}
// Rule 3 - Alignment: boids try to match the speed of surrounding boids
if (distance < rule3Distance) {
alignment += vel1[particleArrayIndices[j]];
++alignmentCount;
}
}
}
}
}
}
// Average out the cohesion velocity and scale it
if (cohesionCount > 0) {
cohesion /= cohesionCount;
cohesion = (cohesion - pos[particleArrayIndices[index]]) * rule1Scale;
deltaVel += cohesion;
}
// Scale the separation velocity
separation *= rule2Scale;
deltaVel += separation;
// Average out the cohesion velocity and scale it
if (alignmentCount > 0) {
alignment *= rule3Scale / alignmentCount;
deltaVel += alignment;
}
glm::vec3 newVel = vel1[particleArrayIndices[index]] + deltaVel;
float newSpeed = glm::length(newVel);
newVel = newSpeed > maxSpeed ? (newVel / newSpeed) * maxSpeed : newVel;
vel2[particleArrayIndices[index]] = newVel;
}
/* We need 1 additional buffer for each data array to avoid synchornization issues and memory access issues since
we are overwriting data */
__global__ void kernShuffleDataArrays(int* indices, glm::vec3* pos, glm::vec3* posCpy,
glm::vec3* vel1, glm::vec3* vel1Cpy, glm::vec3* vel2, glm::vec3* vel2Cpy) {
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
// Remap each data entry at index provided by the indices array to index threadId
posCpy[threadId] = pos[indices[threadId]];
vel1Cpy[threadId] = vel1[indices[threadId]];
vel2Cpy[threadId] = vel2[indices[threadId]];
}
__global__ void kernUpdateVelNeighborSearchCoherent(int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index] + glm::vec3(scene_scale);
thisPos *= inverseCellWidth;
glm::ivec3 gridIndex(glm::floor(thisPos));
int gridCell = gridIndex3Dto1D(gridIndex.x, gridIndex.y, gridIndex.z, gridResolution);
// Identify neighboring cells
glm::vec3 gridMax((gridResolution - 1) * cellWidth);
int maxCell = gridIndex3Dto1D(gridMax.x, gridMax.y, gridMax.z, gridResolution);
int minCell = 0;
int neighbors[8];
findCellNeighbors(neighbors, thisPos, gridResolution, gridCell, minCell, maxCell);
// Compute delta vel
glm::vec3 alignment(0.0f);
glm::vec3 separation(0.0f);
glm::vec3 cohesion(0.0f);
glm::vec3 deltaVel(0.0f);
int alignmentCount = 0;
int cohesionCount = 0;
for (int i = 0; i < 8; i++) {
int neighborIndex = neighbors[i];
if (neighborIndex != -1) {
int start = gridCellStartIndices[neighborIndex];
int end = gridCellEndIndices[neighborIndex];
if (start != -1 && end != -1) {
for (int j = start; j <= end; j++) {
if (j != index) {
glm::vec3 otherPos = pos[j];
float distance = glm::length(pos[index] - otherPos);
// Rule 1 - Cohesion: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance) {
cohesion += otherPos;
++cohesionCount;
}
// Rule 2 - Separation: boids try to stay a distance d away from each other
if (distance < rule2Distance) {
separation -= otherPos - pos[index];
}
// Rule 3 - Alignment: boids try to match the speed of surrounding boids
if (distance < rule3Distance) {
alignment += vel1[j];
++alignmentCount;
}
}
}
}
}
}
// Average out the cohesion velocity and scale it
if (cohesionCount > 0) {
cohesion /= cohesionCount;
cohesion = (cohesion - pos[index]) * rule1Scale;
deltaVel += cohesion;
}
// Scale the separation velocity
separation *= rule2Scale;
deltaVel += separation;
// Average out the cohesion velocity and scale it
if (alignmentCount > 0) {
alignment *= rule3Scale / alignmentCount;
deltaVel += alignment;
}
glm::vec3 newVel = vel1[index] + deltaVel;
float newSpeed = glm::length(newVel);
newVel = newSpeed > maxSpeed ? (newVel / newSpeed) * maxSpeed : newVel;
vel2[index] = newVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Computer new velocity (vel2)
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, dev_vel1, dev_vel2);
// Update position of boids based on new velocity (vel2)
kernUpdatePos << <fullBlocksPerGrid, blockSize>> >(numObjects, dt, dev_pos, dev_vel2);
// Ping-pong the velocity buffers (from vel2 to vel1)
glm::vec3* temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
//hipMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// Compute grid indices & array indices
dim3 fullBlocksPerGridObject((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGridObject, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// Unstable sort keys (grid indices) & values (array indices)
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// Reset start & end buffers to indicate that cells that we won't traverse do not contain boids
dim3 fullBlocksPerGridCellCount((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <fullBlocksPerGridCellCount, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <fullBlocksPerGridCellCount, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
// Naively traverse the sorted key-value pairs and identify the start and end of cells
kernIdentifyCellStartEnd << <fullBlocksPerGridObject, blockSize >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// Computer new velocity (vel2)
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGridObject, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
// Update position of boids based on new velocity (vel2)
kernUpdatePos << <fullBlocksPerGridObject, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
// Ping-pong the velocity buffers (from vel2 to vel1)
hipMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// Compute grid indices & array indices
dim3 fullBlocksPerGridObject((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGridObject, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// Unstable sort keys (grid indices) & values (array indices)
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// Reset start & end buffers to indicate that cells that we won't traverse do not contain boids
dim3 fullBlocksPerGridCellCount((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <fullBlocksPerGridCellCount, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <fullBlocksPerGridCellCount, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
// Naively traverse the sorted key-value pairs and identify the start and end of cells
kernIdentifyCellStartEnd << <fullBlocksPerGridObject, blockSize >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// Reshuffle data arrays
kernShuffleDataArrays << <fullBlocksPerGridObject, blockSize >> > (dev_particleArrayIndices, dev_pos, dev_posCpy, dev_vel1, dev_vel1Cpy, dev_vel2, dev_vel2Cpy);
hipMemcpy(dev_pos, dev_posCpy, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
hipMemcpy(dev_vel1, dev_vel1Cpy, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
hipMemcpy(dev_vel2, dev_vel2Cpy, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
// Update velocity
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGridObject, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices, dev_pos, dev_vel1, dev_vel2);
// Update position of boids based on new velocity (vel2)
kernUpdatePos << <fullBlocksPerGridObject, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
// Ping-pong the velocity buffers (from vel2 to vel1)
hipMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, hipMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_vel1Cpy);
hipFree(dev_vel2Cpy);
hipFree(dev_posCpy);
}
void Boids::unitTest() {
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| a3121c616c61daae6b01e0c0f8898c5c900ffa62.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 16
// Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// Buffers that represent the reshuffled versions of pos/vel1/vel2
glm::vec3 *dev_posCpy;
glm::vec3 *dev_vel1Cpy;
glm::vec3 *dev_vel2Cpy;
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
// Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* This is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray << <fullBlocksPerGrid, blockSize >> > (1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// Computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// Additional buffers for scattered grid
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
// Additional buffers for coherent grid
cudaMalloc((void**)&dev_posCpy, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_posCpy failed!");
cudaMalloc((void**)&dev_vel1Cpy, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1Cpy failed!");
cudaMalloc((void**)&dev_vel2Cpy, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2Cpy failed!");
// Wrap the key/value buffers around the thrust pointers
dev_thrust_particleArrayIndices = thrust::device_pointer_cast(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_pointer_cast(dev_particleGridIndices);
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 alignment(0.0f);
glm::vec3 separation(0.0f);
glm::vec3 cohesion(0.0f);
glm::vec3 deltaVel(0.0f);
float alignmentCount = 0.0f;
float cohesionCount = 0.0f;
for (int i = 0; i < N; i++) {
if (i == iSelf) {
continue;
}
glm::vec3 otherPos = pos[i];
float distance = glm::distance(pos[iSelf], otherPos);
// Rule 1 - Alignment: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance) {
cohesion += otherPos;
++cohesionCount;
}
// Rule 2 - Separation: boids try to stay a distance d away from each other
if (distance < rule2Distance) {
separation -= otherPos - pos[iSelf];
}
// Rule 3 - Cohesion: boids try to match the speed of surrounding boids
if (distance < rule3Distance) {
alignment += vel[i];
++alignmentCount;
}
}
// Average out the cohesion velocity and scale it
if (cohesionCount > 0) {
cohesion /= cohesionCount;
cohesion = (cohesion - pos[iSelf]) * rule1Scale;
deltaVel += cohesion;
}
// Scale the separation velocity
separation *= rule2Scale;
deltaVel += separation;
// Average out the cohesion velocity and scale it
if (alignmentCount > 0) {
alignment *= rule3Scale / alignmentCount;
deltaVel += alignment;
}
return deltaVel;
}
/**
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// Compute new velocity and clamp it
glm::vec3 deltaVel = computeVelocityChange(N, index, pos, vel1);
glm::vec3 newVel = vel1[index] + deltaVel;
float newSpeed = glm::length(newVel);
newVel = newSpeed > maxSpeed ? glm::normalize(newVel) * maxSpeed : newVel;
// Record the new velocity into vel2. Question: why NOT vel1? Answer: because vel1 always contains
// the velocity of the previous frame update. After updating the current frame with the new velocity (vel2)
// we set vel1 = vel2, which is the entire purpose of ping-pong velocity
vel2[index] = newVel;
}
/**
* Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// Consider this method of computing a 1D index from a 3D grid index.
// Since memory is contiguous, it is best to iterate over step by step
// So for z then y then x is better (since x goes 1 index by 1 index)
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution, glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
glm::vec3 offsetPos = pos[index] + glm::vec3(scene_scale); // all boids are now in [0, 2 * scene_scale], makes indexing easier
offsetPos *= inverseCellWidth; // how many cell width is each vector component
glm::ivec3 cellIndex(glm::floor(offsetPos));
gridIndices[index] = gridIndex3Dto1D(cellIndex.x, cellIndex.y, cellIndex.z, gridResolution);
// ith boid has its data in ith position in pos/vel1/vel2 arrays (trivially, but important since we will sort)
indices[index] = index;
}
}
// Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
/* This kernel is called after sorting is done, which means that a threadId will represent some boid
and not necessarily the threadIdth boid (because its a permutation)
*/
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices,
int *gridCellEndIndices) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x; // ach index represents some boid
if (i > 0 && i < N) {
int cell = particleGridIndices[i]; // The cell of the ith boid
int prevCell = particleGridIndices[i - 1]; // The cell of the i - 1th boid
if (prevCell != cell) {
// If the cells are not the same, then we have a new cell! (and end the previous one)
gridCellStartIndices[cell] = i;
gridCellEndIndices[prevCell] = i - 1;
}
} else if (i == 0) {
gridCellStartIndices[particleGridIndices[0]] = 0;
}
if (i == N - 1) {
gridCellEndIndices[particleGridIndices[N-1]] = N - 1;
}
}
/* This determines which octant of the cell the boid is in. E.g if x coord > 0.5f, then the boid is in the right half
of the cell, and the same logic applies to y and z to determine an octant. An octant is represented by a vec3
(direction). I use the vec3 to compute the offset from the original cellIndex to get the other 7 neighbors
*/
__device__ void findCellNeighbors(int* outNeighborGrids, glm::vec3 offsetPos, int gridResolution, int cellIndex, int cellIndexMin, int cellIndexMax) {
glm::ivec3 direction(1, 1, 1);
offsetPos -= glm::floor(offsetPos);
if (offsetPos.x < 0.5f) {
direction.x = -1;
}
if (offsetPos.y < 0.5f) {
direction.y = -1;
}
if (offsetPos.z < 0.5f) {
direction.z = -1;
}
// Neighbors are ordered at a different order (from lowest index to highest index)
outNeighborGrids[0] = cellIndex;
outNeighborGrids[1] = cellIndex + direction.x;
outNeighborGrids[2] = cellIndex + direction.y * gridResolution;
outNeighborGrids[3] = cellIndex + direction.x + direction.y * gridResolution;
outNeighborGrids[4] = cellIndex + direction.z * gridResolution * gridResolution;
outNeighborGrids[5] = cellIndex + direction.x + direction.z * gridResolution * gridResolution;
outNeighborGrids[6] = cellIndex + direction.y * gridResolution + direction.z * gridResolution * gridResolution;
outNeighborGrids[7] = cellIndex + direction.x + direction.y * gridResolution + direction.z * gridResolution * gridResolution;
for (int i = 0; i < 8; i++) {
if (outNeighborGrids[i] > cellIndexMax || outNeighborGrids[i] < cellIndexMin) {
outNeighborGrids[i] = -1;
}
}
return;
}
__global__ void kernUpdateVelNeighborSearchScattered(int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[particleArrayIndices[index]] + glm::vec3(scene_scale);
thisPos *= inverseCellWidth;
glm::ivec3 gridIndex(glm::floor(thisPos));
int gridCell = gridIndex3Dto1D(gridIndex.x, gridIndex.y, gridIndex.z, gridResolution);
// Identify neighboring cells
glm::vec3 gridMax((gridResolution - 1) * cellWidth);
int maxCell = gridIndex3Dto1D(gridMax.x, gridMax.y, gridMax.z, gridResolution);
int minCell = 0;
int neighbors[8];
findCellNeighbors(neighbors, thisPos, gridResolution, gridCell, minCell, maxCell);
// Compute delta vel
glm::vec3 alignment(0.0f);
glm::vec3 separation(0.0f);
glm::vec3 cohesion(0.0f);
glm::vec3 deltaVel(0.0f);
int alignmentCount = 0;
int cohesionCount = 0;
for (int i = 0; i < 8; i++) {
int neighborIndex = neighbors[i];
if (neighborIndex != -1) {
int start = gridCellStartIndices[neighborIndex];
int end = gridCellEndIndices[neighborIndex];
if (start != -1 && end != -1) {
for (int j = start; j <= end; j++) {
if (j != index) {
glm::vec3 otherPos = pos[particleArrayIndices[j]];
float distance = glm::length(pos[particleArrayIndices[index]] - otherPos);
// Rule 1 - Cohesion: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance) {
cohesion += otherPos;
++cohesionCount;
}
// Rule 2 - Separation: boids try to stay a distance d away from each other
if (distance < rule2Distance) {
separation -= otherPos - pos[particleArrayIndices[index]];
}
// Rule 3 - Alignment: boids try to match the speed of surrounding boids
if (distance < rule3Distance) {
alignment += vel1[particleArrayIndices[j]];
++alignmentCount;
}
}
}
}
}
}
// Average out the cohesion velocity and scale it
if (cohesionCount > 0) {
cohesion /= cohesionCount;
cohesion = (cohesion - pos[particleArrayIndices[index]]) * rule1Scale;
deltaVel += cohesion;
}
// Scale the separation velocity
separation *= rule2Scale;
deltaVel += separation;
// Average out the cohesion velocity and scale it
if (alignmentCount > 0) {
alignment *= rule3Scale / alignmentCount;
deltaVel += alignment;
}
glm::vec3 newVel = vel1[particleArrayIndices[index]] + deltaVel;
float newSpeed = glm::length(newVel);
newVel = newSpeed > maxSpeed ? (newVel / newSpeed) * maxSpeed : newVel;
vel2[particleArrayIndices[index]] = newVel;
}
/* We need 1 additional buffer for each data array to avoid synchornization issues and memory access issues since
we are overwriting data */
__global__ void kernShuffleDataArrays(int* indices, glm::vec3* pos, glm::vec3* posCpy,
glm::vec3* vel1, glm::vec3* vel1Cpy, glm::vec3* vel2, glm::vec3* vel2Cpy) {
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
// Remap each data entry at index provided by the indices array to index threadId
posCpy[threadId] = pos[indices[threadId]];
vel1Cpy[threadId] = vel1[indices[threadId]];
vel2Cpy[threadId] = vel2[indices[threadId]];
}
__global__ void kernUpdateVelNeighborSearchCoherent(int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index] + glm::vec3(scene_scale);
thisPos *= inverseCellWidth;
glm::ivec3 gridIndex(glm::floor(thisPos));
int gridCell = gridIndex3Dto1D(gridIndex.x, gridIndex.y, gridIndex.z, gridResolution);
// Identify neighboring cells
glm::vec3 gridMax((gridResolution - 1) * cellWidth);
int maxCell = gridIndex3Dto1D(gridMax.x, gridMax.y, gridMax.z, gridResolution);
int minCell = 0;
int neighbors[8];
findCellNeighbors(neighbors, thisPos, gridResolution, gridCell, minCell, maxCell);
// Compute delta vel
glm::vec3 alignment(0.0f);
glm::vec3 separation(0.0f);
glm::vec3 cohesion(0.0f);
glm::vec3 deltaVel(0.0f);
int alignmentCount = 0;
int cohesionCount = 0;
for (int i = 0; i < 8; i++) {
int neighborIndex = neighbors[i];
if (neighborIndex != -1) {
int start = gridCellStartIndices[neighborIndex];
int end = gridCellEndIndices[neighborIndex];
if (start != -1 && end != -1) {
for (int j = start; j <= end; j++) {
if (j != index) {
glm::vec3 otherPos = pos[j];
float distance = glm::length(pos[index] - otherPos);
// Rule 1 - Cohesion: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance) {
cohesion += otherPos;
++cohesionCount;
}
// Rule 2 - Separation: boids try to stay a distance d away from each other
if (distance < rule2Distance) {
separation -= otherPos - pos[index];
}
// Rule 3 - Alignment: boids try to match the speed of surrounding boids
if (distance < rule3Distance) {
alignment += vel1[j];
++alignmentCount;
}
}
}
}
}
}
// Average out the cohesion velocity and scale it
if (cohesionCount > 0) {
cohesion /= cohesionCount;
cohesion = (cohesion - pos[index]) * rule1Scale;
deltaVel += cohesion;
}
// Scale the separation velocity
separation *= rule2Scale;
deltaVel += separation;
// Average out the cohesion velocity and scale it
if (alignmentCount > 0) {
alignment *= rule3Scale / alignmentCount;
deltaVel += alignment;
}
glm::vec3 newVel = vel1[index] + deltaVel;
float newSpeed = glm::length(newVel);
newVel = newSpeed > maxSpeed ? (newVel / newSpeed) * maxSpeed : newVel;
vel2[index] = newVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Computer new velocity (vel2)
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, dev_vel1, dev_vel2);
// Update position of boids based on new velocity (vel2)
kernUpdatePos << <fullBlocksPerGrid, blockSize>> >(numObjects, dt, dev_pos, dev_vel2);
// Ping-pong the velocity buffers (from vel2 to vel1)
glm::vec3* temp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = temp;
//cudaMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// Compute grid indices & array indices
dim3 fullBlocksPerGridObject((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGridObject, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// Unstable sort keys (grid indices) & values (array indices)
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// Reset start & end buffers to indicate that cells that we won't traverse do not contain boids
dim3 fullBlocksPerGridCellCount((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <fullBlocksPerGridCellCount, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <fullBlocksPerGridCellCount, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
// Naively traverse the sorted key-value pairs and identify the start and end of cells
kernIdentifyCellStartEnd << <fullBlocksPerGridObject, blockSize >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// Computer new velocity (vel2)
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGridObject, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
// Update position of boids based on new velocity (vel2)
kernUpdatePos << <fullBlocksPerGridObject, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
// Ping-pong the velocity buffers (from vel2 to vel1)
cudaMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// Compute grid indices & array indices
dim3 fullBlocksPerGridObject((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGridObject, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// Unstable sort keys (grid indices) & values (array indices)
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// Reset start & end buffers to indicate that cells that we won't traverse do not contain boids
dim3 fullBlocksPerGridCellCount((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer << <fullBlocksPerGridCellCount, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <fullBlocksPerGridCellCount, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
// Naively traverse the sorted key-value pairs and identify the start and end of cells
kernIdentifyCellStartEnd << <fullBlocksPerGridObject, blockSize >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
// Reshuffle data arrays
kernShuffleDataArrays << <fullBlocksPerGridObject, blockSize >> > (dev_particleArrayIndices, dev_pos, dev_posCpy, dev_vel1, dev_vel1Cpy, dev_vel2, dev_vel2Cpy);
cudaMemcpy(dev_pos, dev_posCpy, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_vel1, dev_vel1Cpy, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_vel2, dev_vel2Cpy, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
// Update velocity
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGridObject, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices, dev_pos, dev_vel1, dev_vel2);
// Update position of boids based on new velocity (vel2)
kernUpdatePos << <fullBlocksPerGridObject, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
// Ping-pong the velocity buffers (from vel2 to vel1)
cudaMemcpy(dev_vel1, dev_vel2, sizeof(glm::vec3) * numObjects, cudaMemcpyDeviceToDevice);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_vel1Cpy);
cudaFree(dev_vel2Cpy);
cudaFree(dev_posCpy);
}
void Boids::unitTest() {
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
3da2451d6f3489fd63f056f1e38066bb518691d0.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include "../src/cuspreadinterp.h"
#include "../contrib/utils.h"
using namespace std;
int main(int argc, char* argv[])
{
int nf1, nf2, nf3;
FLT sigma = 2.0;
int N1, N2, N3, M;
if (argc<5) {
fprintf(stderr,
"Usage: interp3d method nupts_distr nf1 nf2 nf3 [M [tol [sort]]]\n"
"Arguments:\n"
" method: One of\n"
" 1: nupts driven, or\n"
" 2: sub-problem.\n"
" nupts_distr: The distribution of the points; one of\n"
" 0: uniform, or\n"
" 1: concentrated in a small region.\n"
" nf1, nf2, nf3: The size of the 3D array.\n"
" M: The number of non-uniform points (default nf1 * nf2 * nf3 / 8).\n"
" tol: NUFFT tolerance (default 1e-6).\n"
" sort: One of\n"
" 0: do not sort the points, or\n"
" 1: sort the points (default).\n");
return 1;
}
double w;
int method;
sscanf(argv[1],"%d",&method);
int nupts_distribute;
sscanf(argv[2],"%d",&nupts_distribute);
sscanf(argv[3],"%lf",&w); nf1 = (int)w; // so can read 1e6 right!
sscanf(argv[4],"%lf",&w); nf2 = (int)w; // so can read 1e6 right!
sscanf(argv[5],"%lf",&w); nf3 = (int)w; // so can read 1e6 right!
N1 = (int) nf1/sigma;
N2 = (int) nf2/sigma;
N3 = (int) nf3/sigma;
M = N1*N2*N3;// let density always be 1
if(argc>6){
sscanf(argv[6],"%lf",&w); M = (int)w; // so can read 1e6 right!
if(M == 0) M=N1*N2*N3;
}
FLT tol=1e-6;
if(argc>7){
sscanf(argv[7],"%lf",&w); tol = (FLT)w; // so can read 1e6 right!
}
int sort=1;
if(argc>8){
sscanf(argv[8],"%d",&sort);
}
int ier;
int ns=::ceil(-log10(tol/10.0));
int maxsubprobsize;
cout<<scientific<<setprecision(3);
FLT *x, *y, *z;
CPX *c, *fw;
hipHostMalloc(&x, M*sizeof(FLT));
hipHostMalloc(&y, M*sizeof(FLT));
hipHostMalloc(&z, M*sizeof(FLT));
hipHostMalloc(&c, M*sizeof(CPX));
hipHostMalloc(&fw,nf1*nf2*nf3*sizeof(CPX));
switch(nupts_distribute){
// Making data
case 1: //uniform
{
for (int i = 0; i < M; i++) {
x[i] = RESCALE(M_PI*randm11(), nf1, 1);// x in [-pi,pi)
y[i] = RESCALE(M_PI*randm11(), nf2, 1);
z[i] = RESCALE(M_PI*randm11(), nf3, 1);
//cout << x[i] << "," << y[i] << "," << z[i] << endl;
}
maxsubprobsize = 65536;
}
break;
case 2: // concentrate on a small region
{
for (int i = 0; i < M; i++) {
x[i] = RESCALE(M_PI*rand01()/(nf1*2/32), nf1, 1);// x in [-pi,pi)
y[i] = RESCALE(M_PI*rand01()/(nf1*2/32), nf2, 1);
z[i] = RESCALE(M_PI*rand01()/(nf3*2/32), nf3, 1);
}
maxsubprobsize = 1024;
}
break;
default:
cerr<<"error: nupts distr should be 1,2" << endl;
return 1;
}
for(int i=0; i<nf1*nf2*nf3; i++){
fw[i].real(1.0);
fw[i].imag(0.0);
}
int dim=3;
cufinufft_plan dplan;
ier = cufinufft_default_opts(2, dim, &dplan.opts);
if(ier != 0 ){
cout<<"error: cufinufft_default_opts"<<endl;
return 0;
}
ier = setup_spreader_for_nufft(dplan.spopts, tol, dplan.opts);
dplan.opts.upsampfac=sigma;
dplan.opts.gpu_method=method;
dplan.opts.gpu_kerevalmeth=1;
dplan.opts.gpu_sort=sort;
dplan.spopts.pirange=0;
if(dplan.opts.gpu_method == 2)
{
dplan.opts.gpu_binsizex=16;
dplan.opts.gpu_binsizey=16;
dplan.opts.gpu_binsizez=2;
dplan.opts.gpu_maxsubprobsize=maxsubprobsize;
}
if(dplan.opts.gpu_method == 1)
{
dplan.opts.gpu_binsizex=16;
dplan.opts.gpu_binsizey=8;
dplan.opts.gpu_binsizez=4;
}
CNTime timer;
/*warm up gpu*/
char *a;
timer.restart();
checkCudaErrors(hipMalloc(&a,1));
#ifdef TIME
cout<<"[time ]"<< " (warm up) First cudamalloc call " << timer.elapsedsec() <<" s"<<endl<<endl;
#endif
#ifdef INFO
cout<<"[info ] Interpolating ["<<nf1<<"x"<<nf2<<"x"<<nf3<<
"] uniform points to "<<M<<"nupts"<<endl;
#endif
timer.restart();
ier = cufinufft_interp3d(N1, N2, N3, nf1, nf2, nf3, fw, M, x, y, z, c, tol,
&dplan);
if(ier != 0 ){
cout<<"error: cnufftinterp3d"<<endl;
return 0;
}
FLT t=timer.elapsedsec();
printf("[Method %d] %ld U pts to #%d NU pts in %.3g s (\t%.3g U pts/s)\n",
dplan.opts.gpu_method,nf1*nf2*nf3,M,t,M/t);
#ifdef RESULT
cout<<"[result-input]"<<endl;
for(int j=0; j<10; j++){
printf(" (%2.3g,%2.3g)",c[j].real(),c[j].imag() );
cout<<endl;
}
cout<<endl;
#endif
hipHostFree(x);
hipHostFree(y);
hipHostFree(z);
hipHostFree(c);
hipHostFree(fw);
return 0;
}
| 3da2451d6f3489fd63f056f1e38066bb518691d0.cu | #include <iostream>
#include <iomanip>
#include <math.h>
#include <helper_cuda.h>
#include <complex>
#include "../src/cuspreadinterp.h"
#include "../contrib/utils.h"
using namespace std;
int main(int argc, char* argv[])
{
int nf1, nf2, nf3;
FLT sigma = 2.0;
int N1, N2, N3, M;
if (argc<5) {
fprintf(stderr,
"Usage: interp3d method nupts_distr nf1 nf2 nf3 [M [tol [sort]]]\n"
"Arguments:\n"
" method: One of\n"
" 1: nupts driven, or\n"
" 2: sub-problem.\n"
" nupts_distr: The distribution of the points; one of\n"
" 0: uniform, or\n"
" 1: concentrated in a small region.\n"
" nf1, nf2, nf3: The size of the 3D array.\n"
" M: The number of non-uniform points (default nf1 * nf2 * nf3 / 8).\n"
" tol: NUFFT tolerance (default 1e-6).\n"
" sort: One of\n"
" 0: do not sort the points, or\n"
" 1: sort the points (default).\n");
return 1;
}
double w;
int method;
sscanf(argv[1],"%d",&method);
int nupts_distribute;
sscanf(argv[2],"%d",&nupts_distribute);
sscanf(argv[3],"%lf",&w); nf1 = (int)w; // so can read 1e6 right!
sscanf(argv[4],"%lf",&w); nf2 = (int)w; // so can read 1e6 right!
sscanf(argv[5],"%lf",&w); nf3 = (int)w; // so can read 1e6 right!
N1 = (int) nf1/sigma;
N2 = (int) nf2/sigma;
N3 = (int) nf3/sigma;
M = N1*N2*N3;// let density always be 1
if(argc>6){
sscanf(argv[6],"%lf",&w); M = (int)w; // so can read 1e6 right!
if(M == 0) M=N1*N2*N3;
}
FLT tol=1e-6;
if(argc>7){
sscanf(argv[7],"%lf",&w); tol = (FLT)w; // so can read 1e6 right!
}
int sort=1;
if(argc>8){
sscanf(argv[8],"%d",&sort);
}
int ier;
int ns=std::ceil(-log10(tol/10.0));
int maxsubprobsize;
cout<<scientific<<setprecision(3);
FLT *x, *y, *z;
CPX *c, *fw;
cudaMallocHost(&x, M*sizeof(FLT));
cudaMallocHost(&y, M*sizeof(FLT));
cudaMallocHost(&z, M*sizeof(FLT));
cudaMallocHost(&c, M*sizeof(CPX));
cudaMallocHost(&fw,nf1*nf2*nf3*sizeof(CPX));
switch(nupts_distribute){
// Making data
case 1: //uniform
{
for (int i = 0; i < M; i++) {
x[i] = RESCALE(M_PI*randm11(), nf1, 1);// x in [-pi,pi)
y[i] = RESCALE(M_PI*randm11(), nf2, 1);
z[i] = RESCALE(M_PI*randm11(), nf3, 1);
//cout << x[i] << "," << y[i] << "," << z[i] << endl;
}
maxsubprobsize = 65536;
}
break;
case 2: // concentrate on a small region
{
for (int i = 0; i < M; i++) {
x[i] = RESCALE(M_PI*rand01()/(nf1*2/32), nf1, 1);// x in [-pi,pi)
y[i] = RESCALE(M_PI*rand01()/(nf1*2/32), nf2, 1);
z[i] = RESCALE(M_PI*rand01()/(nf3*2/32), nf3, 1);
}
maxsubprobsize = 1024;
}
break;
default:
cerr<<"error: nupts distr should be 1,2" << endl;
return 1;
}
for(int i=0; i<nf1*nf2*nf3; i++){
fw[i].real(1.0);
fw[i].imag(0.0);
}
int dim=3;
cufinufft_plan dplan;
ier = cufinufft_default_opts(2, dim, &dplan.opts);
if(ier != 0 ){
cout<<"error: cufinufft_default_opts"<<endl;
return 0;
}
ier = setup_spreader_for_nufft(dplan.spopts, tol, dplan.opts);
dplan.opts.upsampfac=sigma;
dplan.opts.gpu_method=method;
dplan.opts.gpu_kerevalmeth=1;
dplan.opts.gpu_sort=sort;
dplan.spopts.pirange=0;
if(dplan.opts.gpu_method == 2)
{
dplan.opts.gpu_binsizex=16;
dplan.opts.gpu_binsizey=16;
dplan.opts.gpu_binsizez=2;
dplan.opts.gpu_maxsubprobsize=maxsubprobsize;
}
if(dplan.opts.gpu_method == 1)
{
dplan.opts.gpu_binsizex=16;
dplan.opts.gpu_binsizey=8;
dplan.opts.gpu_binsizez=4;
}
CNTime timer;
/*warm up gpu*/
char *a;
timer.restart();
checkCudaErrors(cudaMalloc(&a,1));
#ifdef TIME
cout<<"[time ]"<< " (warm up) First cudamalloc call " << timer.elapsedsec() <<" s"<<endl<<endl;
#endif
#ifdef INFO
cout<<"[info ] Interpolating ["<<nf1<<"x"<<nf2<<"x"<<nf3<<
"] uniform points to "<<M<<"nupts"<<endl;
#endif
timer.restart();
ier = cufinufft_interp3d(N1, N2, N3, nf1, nf2, nf3, fw, M, x, y, z, c, tol,
&dplan);
if(ier != 0 ){
cout<<"error: cnufftinterp3d"<<endl;
return 0;
}
FLT t=timer.elapsedsec();
printf("[Method %d] %ld U pts to #%d NU pts in %.3g s (\t%.3g U pts/s)\n",
dplan.opts.gpu_method,nf1*nf2*nf3,M,t,M/t);
#ifdef RESULT
cout<<"[result-input]"<<endl;
for(int j=0; j<10; j++){
printf(" (%2.3g,%2.3g)",c[j].real(),c[j].imag() );
cout<<endl;
}
cout<<endl;
#endif
cudaFreeHost(x);
cudaFreeHost(y);
cudaFreeHost(z);
cudaFreeHost(c);
cudaFreeHost(fw);
return 0;
}
|
d3077b8cb4146441e7032dce66cfbb7fedb0dc64.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<stdlib.h>
#include<cuda_runtime.h>
#include<time.h>
#include<format.h>
int main(int argc, char** argv){
if(argc < 2) {
fprintf(stderr, "USAGE: %s filename.jpg ...\n", argv[0]);
return EXIT_FAILURE;
}
JPGReader* reader = newJPGReader();
if (!reader) {
fprintf(stderr, "Unable to create jpgreader, likely malloc failure\n");
return EXIT_FAILURE;
}
int error = openJPG(reader, argv[1]);
if (!error) {
const char* filename = (reader->num_channels == 1) ? "outfile.pgm" : "outfile.ppm";
writeJPG(reader, filename);
} else {
printf("Failed to open jpg %s,\n ", argv[1]);
printError(reader); printf("\n");
}
clock_t cumulative_time = 0;
int i, n = 2;
double total_time = 0;
for (i=0; i<n; i++){
int filename_id = 1 + (i % (argc - 1));
clock_t start = clock();
error = openJPG(reader, argv[filename_id]);
total_time += (clock() - start);
if (error){
printf("Failed to open jpg %s,\n ", argv[filename_id]);
printError(reader); printf("\n");
}
cumulative_time += reader->time;
}
delJPGReader(reader);
double t_pi = 1000.0 * (double) total_time / (n * CLOCKS_PER_SEC);
printf("%0.3lfms per image\n", t_pi);
double t = 1000.0 * (double) cumulative_time / CLOCKS_PER_SEC / n;
printf("DEBUG_TIME %0.4lfms, %0.3lf%%\n", t, 100*t/t_pi);
hipDeviceSynchronize();
hipDeviceReset();
return EXIT_SUCCESS;
}
| d3077b8cb4146441e7032dce66cfbb7fedb0dc64.cu | #include<stdio.h>
#include<stdlib.h>
#include<cuda_runtime.h>
#include<time.h>
#include<format.h>
int main(int argc, char** argv){
if(argc < 2) {
fprintf(stderr, "USAGE: %s filename.jpg ...\n", argv[0]);
return EXIT_FAILURE;
}
JPGReader* reader = newJPGReader();
if (!reader) {
fprintf(stderr, "Unable to create jpgreader, likely malloc failure\n");
return EXIT_FAILURE;
}
int error = openJPG(reader, argv[1]);
if (!error) {
const char* filename = (reader->num_channels == 1) ? "outfile.pgm" : "outfile.ppm";
writeJPG(reader, filename);
} else {
printf("Failed to open jpg %s,\n ", argv[1]);
printError(reader); printf("\n");
}
clock_t cumulative_time = 0;
int i, n = 2;
double total_time = 0;
for (i=0; i<n; i++){
int filename_id = 1 + (i % (argc - 1));
clock_t start = clock();
error = openJPG(reader, argv[filename_id]);
total_time += (clock() - start);
if (error){
printf("Failed to open jpg %s,\n ", argv[filename_id]);
printError(reader); printf("\n");
}
cumulative_time += reader->time;
}
delJPGReader(reader);
double t_pi = 1000.0 * (double) total_time / (n * CLOCKS_PER_SEC);
printf("%0.3lfms per image\n", t_pi);
double t = 1000.0 * (double) cumulative_time / CLOCKS_PER_SEC / n;
printf("DEBUG_TIME %0.4lfms, %0.3lf%%\n", t, 100*t/t_pi);
cudaDeviceSynchronize();
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
7509305acad7175e80d7267e123c7d8adf31562f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* CUDA-implemented utility functions & kernels needed by the neural net
* @author Aadyot Bhatngar
* @date April 22, 2018
*/
#include "utils.cuh"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <algorithm>
#include "helper_cuda.h"
// CUDA block width
#define BW 1024
/**
* Sets all entries in a device buffer of floats equal to a specified value.
*/
template<typename T> void cudaMemsetType(T *dev_ptr, T val, int n_vals)
{
thrust::device_ptr<T> thrust_dev_ptr(dev_ptr);
thrust::fill(thrust_dev_ptr, thrust_dev_ptr + n_vals, val);
}
/**
* Invokes a CUDA kernel to compute the average cross entropy between softmaxed
* predictions pred_Y and ground truth true_Y.
*
* @param pred_Y predictions made by model (probability vectors)
* @param true_Y true output values (one-hot vectors)
* @param n number of predictions
* @param c number of channels per prediction
* @param h height of each prediction
* @param w width of each prediction
*
* @return cross-entropy loss between pred_Y and true_Y
*/
float CrossEntropyLoss(float* pred_Y, float* true_Y, int n, int c, int h, int w)
{
// Inialize loss on the device to be zero
float loss, *d_loss;
CUDA_CALL( hipMalloc(&d_loss, sizeof(float)) );
cudaMemsetType<float>(d_loss, 0.0, 1);
// Accumulate the total loss on the device by invoking a kernel
int n_blocks = ::min(65535, (n * c * h * w + BW - 1) / BW);
// TODO (set 5): call CrossEntropyKernel
hipLaunchKernelGGL(( CrossEntropyKernel), dim3(n_blocks), dim3(BW), BW*sizeof(float), 0, pred_Y, true_Y, d_loss, n, c, h, w);
// Copy back the accumulated loss on the device back to the host
CUDA_CALL( hipMemcpy(&loss, d_loss, sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CALL( hipFree(d_loss) );
// Return the average loss
return loss;
}
/**
* Invokes a CUDA kernel to compute the average accuracy of softmaxed predictions
* pred_Y, given ground truth true_Y.
*
* @param pred_Y predictions made by model (probability vectors)
* @param true_Y true output values (one-hot vectors)
* @param n number of predictions
* @param c number of channels per prediction
* @param h height of each prediction
* @param w width of each prediction
*
* @return proportion of n for which the maximum entry in pred_Y (most probable
* class predicted) is the same as the one entry in true_Y (true class)
*/
float SoftThresholdAccuracy(float* pred_Y, float* true_Y,
int n, int c, int h, int w)
{
// Initialize the accuracy on the device to be zero
float acc, *d_acc;
CUDA_CALL( hipMalloc(&d_acc, sizeof(float)) );
cudaMemsetType<float>(d_acc, 0.0, 1);
// Accumulate the total loss on the device by invoking a kernel
int n_blocks = ::min(65535, (n * c * h * w + BW - 1) / BW);
hipLaunchKernelGGL(( SoftThresholdAccKernel), dim3(n_blocks), dim3(BW), BW * sizeof(float), 0, pred_Y, true_Y,
d_acc, n, c, h, w);
// Copy back the accumulated accuracy on the device back to the host
CUDA_CALL(hipMemcpy(&acc, d_acc, sizeof(float), hipMemcpyDeviceToHost));
CUDA_CALL(hipFree(d_acc));
// Return the average accuracy
return acc / static_cast<float>(n);
}
/**
* Kernel to compute cross-entropy between pred_Y and true_Y as described by
* {\link CrossEntropyLoss}.
*/
__global__ void CrossEntropyKernel(float* pred_Y, float* true_Y, float *loss,
int n, int c, int h, int w)
{
extern __shared__ float shmem[];
// TODO (set 5): use a parallel reduction to compute cross-entropy between
// pred_Y and true_Y, i.e. -sum( log(pred_Y[i]) * true_Y[i] ),
// where i ranges from 0 to (n*c*h*w) - 1
int tid = blockDim.x*blockIdx.x + threadIdx.x;
const int local_tid = threadIdx.x;
shmem[local_tid] = 0.0;
while (tid < (n*c*h*w) )
{
shmem[local_tid] -= log(pred_Y[tid]) * true_Y[tid];
tid += gridDim.x*blockDim.x; // Only necessary when the number of blocks > 65535
}
__syncthreads();
for (int s = blockDim.x/2; s > 0; s /= 2)
{
if (local_tid < s)
{
shmem[local_tid] += shmem[local_tid + s];
}
__syncthreads();
}
// atomically add the accumulated loss per block into the global accumulator
if (threadIdx.x == 0)
atomicAdd(loss, shmem[0] / static_cast<float>(n));
}
/**
* Kernel to compute accuracy of pred_Y given ground truth true_Y as described
* by {\link SoftThresholdAccuracy}.
*/
__global__ void SoftThresholdAccKernel(float* pred_Y, float* true_Y, float* acc,
int n, int c, int h, int w)
{
extern __shared__ float shmem[];
unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned tid = threadIdx.x;
// have each thread in each block accumulate some of the total loss in
// shared memory
shmem[tid] = 0.0;
for (; idx < n; idx += blockDim.x * gridDim.x)
{
unsigned idx_cur = idx * c * h * w;
// Determine which copmonent/element of the current prediction vector
// and its corresponding ground truth is largest
unsigned argmax_pred = 0, argmax_true = 0;
for (unsigned j = 0; j < c * h * w; ++j)
{
if (pred_Y[idx_cur + argmax_pred] < pred_Y[idx_cur + j])
argmax_pred = j;
if (true_Y[idx_cur + argmax_true] < true_Y[idx_cur + j])
argmax_true = j;
}
// If we were correct, add 1 to the accuracy count
if (argmax_pred == argmax_true)
shmem[tid] += 1.0;
}
__syncthreads();
// do a reduction to sum up all of the accuracy components in this block's
// shared memory
for (unsigned s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
shmem[tid] += shmem[tid + s];
__syncthreads();
}
// atomically add the accumulated accuracy per block into the global accumulator
if (tid == 0) atomicAdd(acc, shmem[tid]);
}
| 7509305acad7175e80d7267e123c7d8adf31562f.cu | /**
* CUDA-implemented utility functions & kernels needed by the neural net
* @author Aadyot Bhatngar
* @date April 22, 2018
*/
#include "utils.cuh"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <algorithm>
#include "helper_cuda.h"
// CUDA block width
#define BW 1024
/**
* Sets all entries in a device buffer of floats equal to a specified value.
*/
template<typename T> void cudaMemsetType(T *dev_ptr, T val, int n_vals)
{
thrust::device_ptr<T> thrust_dev_ptr(dev_ptr);
thrust::fill(thrust_dev_ptr, thrust_dev_ptr + n_vals, val);
}
/**
* Invokes a CUDA kernel to compute the average cross entropy between softmaxed
* predictions pred_Y and ground truth true_Y.
*
* @param pred_Y predictions made by model (probability vectors)
* @param true_Y true output values (one-hot vectors)
* @param n number of predictions
* @param c number of channels per prediction
* @param h height of each prediction
* @param w width of each prediction
*
* @return cross-entropy loss between pred_Y and true_Y
*/
float CrossEntropyLoss(float* pred_Y, float* true_Y, int n, int c, int h, int w)
{
// Inialize loss on the device to be zero
float loss, *d_loss;
CUDA_CALL( cudaMalloc(&d_loss, sizeof(float)) );
cudaMemsetType<float>(d_loss, 0.0, 1);
// Accumulate the total loss on the device by invoking a kernel
int n_blocks = std::min(65535, (n * c * h * w + BW - 1) / BW);
// TODO (set 5): call CrossEntropyKernel
CrossEntropyKernel<<<n_blocks, BW, BW*sizeof(float)>>>(pred_Y, true_Y, d_loss, n, c, h, w);
// Copy back the accumulated loss on the device back to the host
CUDA_CALL( cudaMemcpy(&loss, d_loss, sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CALL( cudaFree(d_loss) );
// Return the average loss
return loss;
}
/**
* Invokes a CUDA kernel to compute the average accuracy of softmaxed predictions
* pred_Y, given ground truth true_Y.
*
* @param pred_Y predictions made by model (probability vectors)
* @param true_Y true output values (one-hot vectors)
* @param n number of predictions
* @param c number of channels per prediction
* @param h height of each prediction
* @param w width of each prediction
*
* @return proportion of n for which the maximum entry in pred_Y (most probable
* class predicted) is the same as the one entry in true_Y (true class)
*/
float SoftThresholdAccuracy(float* pred_Y, float* true_Y,
int n, int c, int h, int w)
{
// Initialize the accuracy on the device to be zero
float acc, *d_acc;
CUDA_CALL( cudaMalloc(&d_acc, sizeof(float)) );
cudaMemsetType<float>(d_acc, 0.0, 1);
// Accumulate the total loss on the device by invoking a kernel
int n_blocks = std::min(65535, (n * c * h * w + BW - 1) / BW);
SoftThresholdAccKernel<<<n_blocks, BW, BW * sizeof(float)>>>(pred_Y, true_Y,
d_acc, n, c, h, w);
// Copy back the accumulated accuracy on the device back to the host
CUDA_CALL(cudaMemcpy(&acc, d_acc, sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaFree(d_acc));
// Return the average accuracy
return acc / static_cast<float>(n);
}
/**
* Kernel to compute cross-entropy between pred_Y and true_Y as described by
* {\link CrossEntropyLoss}.
*/
__global__ void CrossEntropyKernel(float* pred_Y, float* true_Y, float *loss,
int n, int c, int h, int w)
{
extern __shared__ float shmem[];
// TODO (set 5): use a parallel reduction to compute cross-entropy between
// pred_Y and true_Y, i.e. -sum( log(pred_Y[i]) * true_Y[i] ),
// where i ranges from 0 to (n*c*h*w) - 1
int tid = blockDim.x*blockIdx.x + threadIdx.x;
const int local_tid = threadIdx.x;
shmem[local_tid] = 0.0;
while (tid < (n*c*h*w) )
{
shmem[local_tid] -= log(pred_Y[tid]) * true_Y[tid];
tid += gridDim.x*blockDim.x; // Only necessary when the number of blocks > 65535
}
__syncthreads();
for (int s = blockDim.x/2; s > 0; s /= 2)
{
if (local_tid < s)
{
shmem[local_tid] += shmem[local_tid + s];
}
__syncthreads();
}
// atomically add the accumulated loss per block into the global accumulator
if (threadIdx.x == 0)
atomicAdd(loss, shmem[0] / static_cast<float>(n));
}
/**
* Kernel to compute accuracy of pred_Y given ground truth true_Y as described
* by {\link SoftThresholdAccuracy}.
*/
__global__ void SoftThresholdAccKernel(float* pred_Y, float* true_Y, float* acc,
int n, int c, int h, int w)
{
extern __shared__ float shmem[];
unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned tid = threadIdx.x;
// have each thread in each block accumulate some of the total loss in
// shared memory
shmem[tid] = 0.0;
for (; idx < n; idx += blockDim.x * gridDim.x)
{
unsigned idx_cur = idx * c * h * w;
// Determine which copmonent/element of the current prediction vector
// and its corresponding ground truth is largest
unsigned argmax_pred = 0, argmax_true = 0;
for (unsigned j = 0; j < c * h * w; ++j)
{
if (pred_Y[idx_cur + argmax_pred] < pred_Y[idx_cur + j])
argmax_pred = j;
if (true_Y[idx_cur + argmax_true] < true_Y[idx_cur + j])
argmax_true = j;
}
// If we were correct, add 1 to the accuracy count
if (argmax_pred == argmax_true)
shmem[tid] += 1.0;
}
__syncthreads();
// do a reduction to sum up all of the accuracy components in this block's
// shared memory
for (unsigned s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
shmem[tid] += shmem[tid + s];
__syncthreads();
}
// atomically add the accumulated accuracy per block into the global accumulator
if (tid == 0) atomicAdd(acc, shmem[tid]);
}
|
8d643ba53922ef6680a4ce0cb9d765027c7817eb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
// #include <hip/hip_runtime.h>
// #include <hip/hip_runtime.h>
#include "matrix_sizes.h"
#include "matrix_mult.h"
#if 0 // version 1
// CUDA Kernel
__global__ void
cudakernel_matrix_mul( real_t* C, real_t* A, real_t* B, int wA, int wB)
{
// 2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
// value stores the element that is
// computed by the thread
real_t value = 0;
for (int i = 0; i < wA; ++i)
{
real_t elementA = A[ty * wA + i];
real_t elementB = B[i * wB + tx];
value += elementA * elementB;
}
// Write the matrix to device memory each
// thread writes one element
C[ty * wA + tx] = value;
}
#endif
#if 1 // version 2
// CUDA Kernel
// Multiply two matrices A * B = C
__global__ void
cudakernel_matrix_mul( real_t* C, real_t* A, real_t* B, int wA, int wB)
{
// 2D Thread ID
int tx = blockIdx.x * TILE_SIZE + threadIdx.x;
int ty = blockIdx.y * TILE_SIZE + threadIdx.y;
// value stores the element that is
// computed by the thread
real_t value = 0;
for (int i = 0; i < wA; ++i)
{
real_t elementA = A[ty * wA + i];
real_t elementB = B[i * wB + tx];
value += elementA * elementB;
}
// Write the matrix to device memory
// each thread writes one element
C[ty * wA + tx] = value;
}
#endif
void gpu_mat_mul(real_t* h_A, real_t* h_B, real_t* h_C )
{
// allocate device memory
real_t* d_A;
real_t* d_B;
real_t* d_C;
unsigned int size_A = WA * HA;
unsigned int size_B = WB * HB;
unsigned int size_C = WC * HC;
unsigned int mem_size_A = sizeof(real_t) * size_A;
unsigned int mem_size_B = sizeof(real_t) * size_B;
unsigned int mem_size_C = sizeof(real_t) * size_C;
hipMalloc((void**) &d_A, mem_size_A);
hipMalloc((void**) &d_B, mem_size_B);
hipMalloc((void**) &d_C, mem_size_C);
// copy host memory to device*/
hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
// perform the calculation
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(WC / threads.x, HC / threads.y);
// execute the kernel
hipLaunchKernelGGL(( cudakernel_matrix_mul), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB);
// copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
| 8d643ba53922ef6680a4ce0cb9d765027c7817eb.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
// #include <cuda.h>
// #include <cuda_runtime.h>
#include "matrix_sizes.h"
#include "matrix_mult.h"
#if 0 // version 1
// CUDA Kernel
__global__ void
cudakernel_matrix_mul( real_t* C, real_t* A, real_t* B, int wA, int wB)
{
// 2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
// value stores the element that is
// computed by the thread
real_t value = 0;
for (int i = 0; i < wA; ++i)
{
real_t elementA = A[ty * wA + i];
real_t elementB = B[i * wB + tx];
value += elementA * elementB;
}
// Write the matrix to device memory each
// thread writes one element
C[ty * wA + tx] = value;
}
#endif
#if 1 // version 2
// CUDA Kernel
// Multiply two matrices A * B = C
__global__ void
cudakernel_matrix_mul( real_t* C, real_t* A, real_t* B, int wA, int wB)
{
// 2D Thread ID
int tx = blockIdx.x * TILE_SIZE + threadIdx.x;
int ty = blockIdx.y * TILE_SIZE + threadIdx.y;
// value stores the element that is
// computed by the thread
real_t value = 0;
for (int i = 0; i < wA; ++i)
{
real_t elementA = A[ty * wA + i];
real_t elementB = B[i * wB + tx];
value += elementA * elementB;
}
// Write the matrix to device memory
// each thread writes one element
C[ty * wA + tx] = value;
}
#endif
void gpu_mat_mul(real_t* h_A, real_t* h_B, real_t* h_C )
{
// allocate device memory
real_t* d_A;
real_t* d_B;
real_t* d_C;
unsigned int size_A = WA * HA;
unsigned int size_B = WB * HB;
unsigned int size_C = WC * HC;
unsigned int mem_size_A = sizeof(real_t) * size_A;
unsigned int mem_size_B = sizeof(real_t) * size_B;
unsigned int mem_size_C = sizeof(real_t) * size_C;
cudaMalloc((void**) &d_A, mem_size_A);
cudaMalloc((void**) &d_B, mem_size_B);
cudaMalloc((void**) &d_C, mem_size_C);
// copy host memory to device*/
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
// perform the calculation
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(WC / threads.x, HC / threads.y);
// execute the kernel
cudakernel_matrix_mul<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
ed7ece41f044f928471c2f6760ec84556243caf3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include "kernel_fin_2.cu"
#include "kernel_ecc_2.cu"
#include "kernel_cam_2.cu"
#include "kernel_2.cu"
#include "embedded_fehlberg_7_8_2.cu"
#include "solver_2.cu"
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
int work_2( int xmax,
int workload){
//================================================================================80
// VARIABLES
//================================================================================80
//============================================================60
// TIME
//============================================================60
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
time0 = get_time();
//============================================================60
// COUNTERS, POINTERS
//============================================================60
long memory;
int i;
int pointer;
//============================================================60
// X/Y INPUTS/OUTPUTS, PARAMS INPUTS
//============================================================60
fp* y;
fp* d_y;
long y_mem;
fp* x;
fp* d_x;
long x_mem;
fp* params;
fp* d_params;
int params_mem;
//============================================================60
// TEMPORARY SOLVER VARIABLES
//============================================================60
fp* d_com;
int com_mem;
fp* d_err;
int err_mem;
fp* d_scale;
int scale_mem;
fp* d_yy;
int yy_mem;
fp* d_initvalu_temp;
int initvalu_temp_mem;
fp* d_finavalu_temp;
int finavalu_temp_mem;
//============================================================60
// CUDA KERNELS EXECUTION PARAMETERS
//============================================================60
dim3 threads;
dim3 blocks;
int blocks_x;
time1 = get_time();
//================================================================================80
// ALLOCATE MEMORY
//================================================================================80
//============================================================60
// MEMORY CHECK
//============================================================60
memory = workload*(xmax+1)*EQUATIONS*4;
if(memory>1000000000){
printf("ERROR: trying to allocate more than 1.0GB of memory, decrease workload and span parameters or change memory parameter\n");
return 0;
}
//============================================================60
// ALLOCATE ARRAYS
//============================================================60
//========================================40
// X/Y INPUTS/OUTPUTS, PARAMS INPUTS
//========================================40
y_mem = workload * (xmax+1) * EQUATIONS * sizeof(fp);
y= (fp *) malloc(y_mem);
hipMalloc((void **)&d_y, y_mem);
x_mem = workload * (xmax+1) * sizeof(fp);
x= (fp *) malloc(x_mem);
hipMalloc((void **)&d_x, x_mem);
params_mem = workload * PARAMETERS * sizeof(fp);
params= (fp *) malloc(params_mem);
hipMalloc((void **)&d_params, params_mem);
//========================================40
// TEMPORARY SOLVER VARIABLES
//========================================40
com_mem = workload * 3 * sizeof(fp);
hipMalloc((void **)&d_com, com_mem);
err_mem = workload * EQUATIONS * sizeof(fp);
hipMalloc((void **)&d_err, err_mem);
scale_mem = workload * EQUATIONS * sizeof(fp);
hipMalloc((void **)&d_scale, scale_mem);
yy_mem = workload * EQUATIONS * sizeof(fp);
hipMalloc((void **)&d_yy, yy_mem);
initvalu_temp_mem = workload * EQUATIONS * sizeof(fp);
hipMalloc((void **)&d_initvalu_temp, initvalu_temp_mem);
finavalu_temp_mem = workload * 13* EQUATIONS * sizeof(fp);
hipMalloc((void **)&d_finavalu_temp, finavalu_temp_mem);
time2 = get_time();
//================================================================================80
// READ FROM FILES OR SET INITIAL VALUES
//================================================================================80
//========================================40
// X
//========================================40
for(i=0; i<workload; i++){
pointer = i * (xmax+1) + 0;
x[pointer] = 0;
}
hipMemcpy(d_x, x, x_mem, hipMemcpyHostToDevice);
//========================================40
// Y
//========================================40
for(i=0; i<workload; i++){
pointer = i*((xmax+1)*EQUATIONS) + 0*(EQUATIONS);
read("../../data/myocyte/y.txt",
&y[pointer],
91,
1,
0);
}
hipMemcpy(d_y, y, y_mem, hipMemcpyHostToDevice);
//========================================40
// PARAMS
//========================================40
for(i=0; i<workload; i++){
pointer = i*PARAMETERS;
read("../../data/myocyte/params.txt",
¶ms[pointer],
18,
1,
0);
}
hipMemcpy(d_params, params, params_mem, hipMemcpyHostToDevice);
time3 = get_time();
//================================================================================80
// EXECUTION IF THERE ARE MANY WORKLOADS
//================================================================================80
if(workload == 1){
threads.x = 32; // define the number of threads in the block
threads.y = 1;
blocks.x = 4; // define the number of blocks in the grid
blocks.y = 1;
}
else{
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks_x = workload/threads.x;
if (workload % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks.x = blocks_x; // define the number of blocks in the grid
blocks.y = 1;
}
hipLaunchKernelGGL(( solver_2), dim3(blocks), dim3(threads), 0, 0, workload,
xmax,
d_x,
d_y,
d_params,
d_com,
d_err,
d_scale,
d_yy,
d_initvalu_temp,
d_finavalu_temp);
// hipDeviceSynchronize();
// printf("CUDA error: %s\n", hipGetErrorString(hipGetLastError()));
time4 = get_time();
//================================================================================80
// COPY DATA BACK TO CPU
//================================================================================80
hipMemcpy(x, d_x, x_mem, hipMemcpyDeviceToHost);
hipMemcpy(y, d_y, y_mem, hipMemcpyDeviceToHost);
time5 = get_time();
//================================================================================80
// PRINT RESULTS (ENABLE SELECTIVELY FOR TESTING ONLY)
//================================================================================80
// int j, k;
// for(i=0; i<workload; i++){
// printf("WORKLOAD %d:\n", i);
// for(j=0; j<(xmax+1); j++){
// printf("\tTIME %d:\n", j);
// for(k=0; k<EQUATIONS; k++){
// printf("\t\ty[%d][%d][%d]=%13.10f\n", i, j, k, y[i*((xmax+1)*EQUATIONS) + j*(EQUATIONS)+k]);
// }
// }
// }
// for(i=0; i<workload; i++){
// printf("WORKLOAD %d:\n", i);
// for(j=0; j<(xmax+1); j++){
// printf("\tTIME %d:\n", j);
// printf("\t\tx[%d][%d]=%13.10f\n", i, j, x[i * (xmax+1) + j]);
// }
// }
//================================================================================80
// DEALLOCATION
//================================================================================80
//============================================================60
// X/Y INPUTS/OUTPUTS, PARAMS INPUTS
//============================================================60
free(y);
hipFree(d_y);
free(x);
hipFree(d_x);
free(params);
hipFree(d_params);
//============================================================60
// TEMPORARY SOLVER VARIABLES
//============================================================60
hipFree(d_com);
hipFree(d_err);
hipFree(d_scale);
hipFree(d_yy);
hipFree(d_initvalu_temp);
hipFree(d_finavalu_temp);
time6= get_time();
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%.12f s, %.12f % : SETUP VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : ALLOCATE CPU MEMORY AND GPU MEMORY\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : READ DATA FROM FILES, COPY TO GPU MEMORY\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : RUN GPU KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : COPY GPU DATA TO CPU MEMORY\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : FREE MEMORY\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time6-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time6-time0) / 1000000);
//====================================================================================================100
// END OF FILE
//====================================================================================================100
return 0;
}
| ed7ece41f044f928471c2f6760ec84556243caf3.cu | //====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include "kernel_fin_2.cu"
#include "kernel_ecc_2.cu"
#include "kernel_cam_2.cu"
#include "kernel_2.cu"
#include "embedded_fehlberg_7_8_2.cu"
#include "solver_2.cu"
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
int work_2( int xmax,
int workload){
//================================================================================80
// VARIABLES
//================================================================================80
//============================================================60
// TIME
//============================================================60
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
time0 = get_time();
//============================================================60
// COUNTERS, POINTERS
//============================================================60
long memory;
int i;
int pointer;
//============================================================60
// X/Y INPUTS/OUTPUTS, PARAMS INPUTS
//============================================================60
fp* y;
fp* d_y;
long y_mem;
fp* x;
fp* d_x;
long x_mem;
fp* params;
fp* d_params;
int params_mem;
//============================================================60
// TEMPORARY SOLVER VARIABLES
//============================================================60
fp* d_com;
int com_mem;
fp* d_err;
int err_mem;
fp* d_scale;
int scale_mem;
fp* d_yy;
int yy_mem;
fp* d_initvalu_temp;
int initvalu_temp_mem;
fp* d_finavalu_temp;
int finavalu_temp_mem;
//============================================================60
// CUDA KERNELS EXECUTION PARAMETERS
//============================================================60
dim3 threads;
dim3 blocks;
int blocks_x;
time1 = get_time();
//================================================================================80
// ALLOCATE MEMORY
//================================================================================80
//============================================================60
// MEMORY CHECK
//============================================================60
memory = workload*(xmax+1)*EQUATIONS*4;
if(memory>1000000000){
printf("ERROR: trying to allocate more than 1.0GB of memory, decrease workload and span parameters or change memory parameter\n");
return 0;
}
//============================================================60
// ALLOCATE ARRAYS
//============================================================60
//========================================40
// X/Y INPUTS/OUTPUTS, PARAMS INPUTS
//========================================40
y_mem = workload * (xmax+1) * EQUATIONS * sizeof(fp);
y= (fp *) malloc(y_mem);
cudaMalloc((void **)&d_y, y_mem);
x_mem = workload * (xmax+1) * sizeof(fp);
x= (fp *) malloc(x_mem);
cudaMalloc((void **)&d_x, x_mem);
params_mem = workload * PARAMETERS * sizeof(fp);
params= (fp *) malloc(params_mem);
cudaMalloc((void **)&d_params, params_mem);
//========================================40
// TEMPORARY SOLVER VARIABLES
//========================================40
com_mem = workload * 3 * sizeof(fp);
cudaMalloc((void **)&d_com, com_mem);
err_mem = workload * EQUATIONS * sizeof(fp);
cudaMalloc((void **)&d_err, err_mem);
scale_mem = workload * EQUATIONS * sizeof(fp);
cudaMalloc((void **)&d_scale, scale_mem);
yy_mem = workload * EQUATIONS * sizeof(fp);
cudaMalloc((void **)&d_yy, yy_mem);
initvalu_temp_mem = workload * EQUATIONS * sizeof(fp);
cudaMalloc((void **)&d_initvalu_temp, initvalu_temp_mem);
finavalu_temp_mem = workload * 13* EQUATIONS * sizeof(fp);
cudaMalloc((void **)&d_finavalu_temp, finavalu_temp_mem);
time2 = get_time();
//================================================================================80
// READ FROM FILES OR SET INITIAL VALUES
//================================================================================80
//========================================40
// X
//========================================40
for(i=0; i<workload; i++){
pointer = i * (xmax+1) + 0;
x[pointer] = 0;
}
cudaMemcpy(d_x, x, x_mem, cudaMemcpyHostToDevice);
//========================================40
// Y
//========================================40
for(i=0; i<workload; i++){
pointer = i*((xmax+1)*EQUATIONS) + 0*(EQUATIONS);
read("../../data/myocyte/y.txt",
&y[pointer],
91,
1,
0);
}
cudaMemcpy(d_y, y, y_mem, cudaMemcpyHostToDevice);
//========================================40
// PARAMS
//========================================40
for(i=0; i<workload; i++){
pointer = i*PARAMETERS;
read("../../data/myocyte/params.txt",
¶ms[pointer],
18,
1,
0);
}
cudaMemcpy(d_params, params, params_mem, cudaMemcpyHostToDevice);
time3 = get_time();
//================================================================================80
// EXECUTION IF THERE ARE MANY WORKLOADS
//================================================================================80
if(workload == 1){
threads.x = 32; // define the number of threads in the block
threads.y = 1;
blocks.x = 4; // define the number of blocks in the grid
blocks.y = 1;
}
else{
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks_x = workload/threads.x;
if (workload % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks.x = blocks_x; // define the number of blocks in the grid
blocks.y = 1;
}
solver_2<<<blocks, threads>>>( workload,
xmax,
d_x,
d_y,
d_params,
d_com,
d_err,
d_scale,
d_yy,
d_initvalu_temp,
d_finavalu_temp);
// cudaThreadSynchronize();
// printf("CUDA error: %s\n", cudaGetErrorString(cudaGetLastError()));
time4 = get_time();
//================================================================================80
// COPY DATA BACK TO CPU
//================================================================================80
cudaMemcpy(x, d_x, x_mem, cudaMemcpyDeviceToHost);
cudaMemcpy(y, d_y, y_mem, cudaMemcpyDeviceToHost);
time5 = get_time();
//================================================================================80
// PRINT RESULTS (ENABLE SELECTIVELY FOR TESTING ONLY)
//================================================================================80
// int j, k;
// for(i=0; i<workload; i++){
// printf("WORKLOAD %d:\n", i);
// for(j=0; j<(xmax+1); j++){
// printf("\tTIME %d:\n", j);
// for(k=0; k<EQUATIONS; k++){
// printf("\t\ty[%d][%d][%d]=%13.10f\n", i, j, k, y[i*((xmax+1)*EQUATIONS) + j*(EQUATIONS)+k]);
// }
// }
// }
// for(i=0; i<workload; i++){
// printf("WORKLOAD %d:\n", i);
// for(j=0; j<(xmax+1); j++){
// printf("\tTIME %d:\n", j);
// printf("\t\tx[%d][%d]=%13.10f\n", i, j, x[i * (xmax+1) + j]);
// }
// }
//================================================================================80
// DEALLOCATION
//================================================================================80
//============================================================60
// X/Y INPUTS/OUTPUTS, PARAMS INPUTS
//============================================================60
free(y);
cudaFree(d_y);
free(x);
cudaFree(d_x);
free(params);
cudaFree(d_params);
//============================================================60
// TEMPORARY SOLVER VARIABLES
//============================================================60
cudaFree(d_com);
cudaFree(d_err);
cudaFree(d_scale);
cudaFree(d_yy);
cudaFree(d_initvalu_temp);
cudaFree(d_finavalu_temp);
time6= get_time();
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%.12f s, %.12f % : SETUP VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : ALLOCATE CPU MEMORY AND GPU MEMORY\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : READ DATA FROM FILES, COPY TO GPU MEMORY\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : RUN GPU KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : COPY GPU DATA TO CPU MEMORY\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time6-time0) * 100);
printf("%.12f s, %.12f % : FREE MEMORY\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time6-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time6-time0) / 1000000);
//====================================================================================================100
// END OF FILE
//====================================================================================================100
return 0;
}
|
c46e5e555e69eea31d747fb1838c1e1c1a66a4ca.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file : main_smartCUB.cu
* @brief : Main file for Smart pointers for CUB (shared and unique ptrs) classes, in C++14,
* @details : A playground to try out things in smart pointers;
* especially abstracting our use of smart pointers with CUDA.
* Notice that std::make_unique DOES NOT have a custom deleter! (!!!)
* Same with std::make_shared!
* cf. https://stackoverflow.com/questions/34243367/how-to-pass-deleter-to-make-shared
* @author : Ernest Yeung <[email protected]>
* @date : 20170904
* @ref : cf. Scott Meyers Effective Modern C++
* http://shaharmike.com/cpp/unique-ptr/
* https://katyscode.wordpress.com/2012/10/04/c11-using-stdunique_ptr-as-a-class-member-initialization-move-semantics-and-custom-deleters/
* If you find this code useful, feel free to donate directly and easily at this direct PayPal link:
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc -std=c++14 -I ~/cub-1.7.4 ./smartptr/smartCUB.cu ./smartptr/smartptr.cu main_smartCUB.cu -o main_smartCUB.exe
*
* */
#include <iostream> // std::cout
#include "smartptr/smartptr.h"
#include "smartptr/smartCUB.h"
int main(int argc, char* argv[]) {
constexpr const int Lx = (1 << 8);
std::cout << " Lx : " << Lx << std::endl;
// Allocate host arrays
std::vector<float> f_vec(Lx,1.f);
// Allocate problem device arrays
RRModule_sh X_sh(Lx);
// Initialize device input
X_sh.load_from_hvec(f_vec);
auto d_in = std::move( X_sh.get() );
auto d_out = reduce_Sum(Lx, d_in);
// Allocate output host array
std::vector<float> g_vec(1,0.f);
// Copy results from Device to Host
hipMemcpy(g_vec.data(), d_out.get(), 1*sizeof(float),hipMemcpyDeviceToHost);
// print out result:
std::cout << " g_vec[0] : " << g_vec[0] << std::endl;
// Allocate host arrays
std::vector<float> f1_vec(Lx,2.f);
// Allocate problem device arrays
auto sh_instance = make_sh_u(Lx);
// Initialize device input
hipMemcpy(sh_instance.get(), f1_vec.data(), Lx*sizeof(float), hipMemcpyHostToDevice);
auto d_out1 = reduce_Sum(Lx, sh_instance);
// Allocate output host array
std::vector<float> g1_vec(1,0.f);
// Copy results from Device to Host
hipMemcpy(g1_vec.data(), d_out1.get(), 1*sizeof(float),hipMemcpyDeviceToHost);
// print out result:
std::cout << " g1_vec[0] : " << g1_vec[0] << std::endl;
// Clean up
hipDeviceReset();
return 0;
}
| c46e5e555e69eea31d747fb1838c1e1c1a66a4ca.cu | /**
* @file : main_smartCUB.cu
* @brief : Main file for Smart pointers for CUB (shared and unique ptrs) classes, in C++14,
* @details : A playground to try out things in smart pointers;
* especially abstracting our use of smart pointers with CUDA.
* Notice that std::make_unique DOES NOT have a custom deleter! (!!!)
* Same with std::make_shared!
* cf. https://stackoverflow.com/questions/34243367/how-to-pass-deleter-to-make-shared
* @author : Ernest Yeung <[email protected]>
* @date : 20170904
* @ref : cf. Scott Meyers Effective Modern C++
* http://shaharmike.com/cpp/unique-ptr/
* https://katyscode.wordpress.com/2012/10/04/c11-using-stdunique_ptr-as-a-class-member-initialization-move-semantics-and-custom-deleters/
* If you find this code useful, feel free to donate directly and easily at this direct PayPal link:
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc -std=c++14 -I ~/cub-1.7.4 ./smartptr/smartCUB.cu ./smartptr/smartptr.cu main_smartCUB.cu -o main_smartCUB.exe
*
* */
#include <iostream> // std::cout
#include "smartptr/smartptr.h"
#include "smartptr/smartCUB.h"
int main(int argc, char* argv[]) {
constexpr const int Lx = (1 << 8);
std::cout << " Lx : " << Lx << std::endl;
// Allocate host arrays
std::vector<float> f_vec(Lx,1.f);
// Allocate problem device arrays
RRModule_sh X_sh(Lx);
// Initialize device input
X_sh.load_from_hvec(f_vec);
auto d_in = std::move( X_sh.get() );
auto d_out = reduce_Sum(Lx, d_in);
// Allocate output host array
std::vector<float> g_vec(1,0.f);
// Copy results from Device to Host
cudaMemcpy(g_vec.data(), d_out.get(), 1*sizeof(float),cudaMemcpyDeviceToHost);
// print out result:
std::cout << " g_vec[0] : " << g_vec[0] << std::endl;
// Allocate host arrays
std::vector<float> f1_vec(Lx,2.f);
// Allocate problem device arrays
auto sh_instance = make_sh_u(Lx);
// Initialize device input
cudaMemcpy(sh_instance.get(), f1_vec.data(), Lx*sizeof(float), cudaMemcpyHostToDevice);
auto d_out1 = reduce_Sum(Lx, sh_instance);
// Allocate output host array
std::vector<float> g1_vec(1,0.f);
// Copy results from Device to Host
cudaMemcpy(g1_vec.data(), d_out1.get(), 1*sizeof(float),cudaMemcpyDeviceToHost);
// print out result:
std::cout << " g1_vec[0] : " << g1_vec[0] << std::endl;
// Clean up
cudaDeviceReset();
return 0;
}
|
86485401c05044174f9774c9d0c30dd8845c6802.hip | // !!! This is a file automatically generated by hipify!!!
// http://www.cl.cam.ac.uk/research/rainbow/projects/dcbgrid/DCBGrid-preprint.pdf
#include "GpuDepthMap.hpp"
#include "AmCudaHelper.cuh"
#include "helper_cuda.h"
//#include "CudaHelperCommon.cuh"
#if 0
template<typename T>
void GpuDepthMap<T>::Create( GpuDepthMapType type, int width, int height )
{
if ( type != mapType || width != w || height != h )
{
Destroy();
//LOG_EVENT("Creating depth map");
CUDA_CALL( hipMallocPitch((void**)&gpuImage, &gpuImagePitch, width * sizeof(T), height) );
w = width;
h = height;
mapType = type;
}
}
template<typename T>
void GpuDepthMap<T>::Destroy()
{
//LOG_EVENT("Destroying depth map");
CUDA_FREE(gpuImage);
w = h = 0;
}
template<typename T>
void GpuDepthMap<T>::CopyDataOut( T* hostData )
{
CUDA_CALL(hipMemcpy2D( hostData, w * sizeof(T),
gpuImage, gpuImagePitch,
w * sizeof(T), h,
hipMemcpyDeviceToHost ));
}
template<typename T>
void GpuDepthMap<T>::CopyDataIn( T* const hostData )
{
checkCudaErrors(hipMemcpy2D( gpuImage, gpuImagePitch, hostData,
w * sizeof(T), w * sizeof(T), h,
hipMemcpyHostToDevice ));
}
template<typename T>
void GpuDepthMap<T>::SwapData(GpuDepthMap & other)
{
std::swap<T*>(gpuImage, other.gpuImage);
std::swap<size_t>(gpuImagePitch, other.gpuImagePitch);
std::swap<GpuDepthMapType>(mapType, other.mapType);
std::swap<int>(w, other.w);
std::swap<int>(h, other.h);
}
#endif
| 86485401c05044174f9774c9d0c30dd8845c6802.cu | // http://www.cl.cam.ac.uk/research/rainbow/projects/dcbgrid/DCBGrid-preprint.pdf
#include "GpuDepthMap.hpp"
#include "AmCudaHelper.cuh"
#include "helper_cuda.h"
//#include "CudaHelperCommon.cuh"
#if 0
template<typename T>
void GpuDepthMap<T>::Create( GpuDepthMapType type, int width, int height )
{
if ( type != mapType || width != w || height != h )
{
Destroy();
//LOG_EVENT("Creating depth map");
CUDA_CALL( cudaMallocPitch((void**)&gpuImage, &gpuImagePitch, width * sizeof(T), height) );
w = width;
h = height;
mapType = type;
}
}
template<typename T>
void GpuDepthMap<T>::Destroy()
{
//LOG_EVENT("Destroying depth map");
CUDA_FREE(gpuImage);
w = h = 0;
}
template<typename T>
void GpuDepthMap<T>::CopyDataOut( T* hostData )
{
CUDA_CALL(cudaMemcpy2D( hostData, w * sizeof(T),
gpuImage, gpuImagePitch,
w * sizeof(T), h,
cudaMemcpyDeviceToHost ));
}
template<typename T>
void GpuDepthMap<T>::CopyDataIn( T* const hostData )
{
checkCudaErrors(cudaMemcpy2D( gpuImage, gpuImagePitch, hostData,
w * sizeof(T), w * sizeof(T), h,
cudaMemcpyHostToDevice ));
}
template<typename T>
void GpuDepthMap<T>::SwapData(GpuDepthMap & other)
{
std::swap<T*>(gpuImage, other.gpuImage);
std::swap<size_t>(gpuImagePitch, other.gpuImagePitch);
std::swap<GpuDepthMapType>(mapType, other.mapType);
std::swap<int>(w, other.w);
std::swap<int>(h, other.h);
}
#endif
|
680baf4972c738c5ce5ab86e6ef5d590e0f42ba2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "PolygonSideMapConstructing.hpp"
__device__ bool PolygonSideMapConstructing::isThereAnEdge(const Graph::byte& nodeEdges, GraphEdge direction)
{
return nodeEdges & static_cast<Graph::byte>(direction);
}
__device__ bool
PolygonSideMapConstructing::isThisQuarterForwardSlash(int row, int col, int width, const Graph::byte* graphData)
{
int idxOfGraphEntry = col + row * width;
Graph::byte checkedGraphData = graphData[idxOfGraphEntry];
return isThereAnEdge(checkedGraphData, GraphEdge::UPPER_LEFT);
}
__device__ bool
PolygonSideMapConstructing::isThisQuarterBackslash(int row, int col, int width, const Graph::byte* graphData)
{
int idxOfGraphEntry = col - 1 + row * width;
Graph::byte checkedGraphData = graphData[idxOfGraphEntry];
return isThereAnEdge(checkedGraphData, GraphEdge::UPPER_RIGHT);
}
__global__ void
PolygonSideMapConstructing::createPolygonSide(PolygonSide* sideData, const Graph::byte* graphData, int width, int height)
{
int row = threadIdx.x + (blockIdx.x * blockDim.x);
int col = threadIdx.y + (blockIdx.y * blockDim.y);
if(row < height && col < width)
{
int idxOfQuarter = col + row * width;
PolygonSide::Type currentQuarterSideType = PolygonSide::Type::Point;
if(row != 0 && col != 0)
{
if(isThisQuarterBackslash(row, col, width, graphData))
currentQuarterSideType = PolygonSide::Type::Backslash;
else if(isThisQuarterForwardSlash(row, col, width, graphData))
currentQuarterSideType = PolygonSide::Type::ForwardSlash;
}
PolygonSide::info_type numberOfRegionsUsingPointA = 0;
if(row > 0 && col > 0 && currentQuarterSideType == PolygonSide::Type::Point)
{
Graph::byte currentNodeGraphData = graphData[idxOfQuarter];
Graph::byte upperLeftNodeGraphData = graphData[idxOfQuarter - width - 1];
bool isCurrentNodeConnectedUp = isThereAnEdge(currentNodeGraphData, GraphEdge::UP);
bool isCurrentNodeConnectedLeft = isThereAnEdge(currentNodeGraphData, GraphEdge::LEFT);
bool isUpperLeftNodeConnectedDown = isThereAnEdge(upperLeftNodeGraphData, GraphEdge::DOWN);
bool isUpperLeftNodeConnectedRight = isThereAnEdge(upperLeftNodeGraphData, GraphEdge::RIGHT);
numberOfRegionsUsingPointA =
(4 - isCurrentNodeConnectedUp - isCurrentNodeConnectedLeft - isUpperLeftNodeConnectedDown -
isUpperLeftNodeConnectedRight) << 5;
}
PolygonSide::point_type rowA = static_cast<PolygonSide::point_type>(row * 100);
PolygonSide::point_type colA = static_cast<PolygonSide::point_type>(col * 100);
PolygonSide::point_type rowB = static_cast<PolygonSide::point_type>(row * 100);
PolygonSide::point_type colB = static_cast<PolygonSide::point_type>(col * 100);
if(currentQuarterSideType == PolygonSide::Type::ForwardSlash)
{
rowA -= 25.0f;
colA += 25.0f;
rowB += 25.0f;
colB -= 25.0f;
}
else if(currentQuarterSideType == PolygonSide::Type::Backslash)
{
rowA -= 25.0f;
colA -= 25.0f;
rowB += 25.0f;
colB += 25.0f;
}
sideData[idxOfQuarter].info = numberOfRegionsUsingPointA | static_cast<PolygonSide::info_type>(currentQuarterSideType);
sideData[idxOfQuarter].pointA[0] = rowA;
sideData[idxOfQuarter].pointA[1] = colA;
sideData[idxOfQuarter].pointB[0] = rowB;
sideData[idxOfQuarter].pointB[1] = colB;
}
}
void PolygonSideMapConstructing::createMap(PolygonSide* sideData, const Graph::byte* graphData, int width, int height)
{
dim3 dimBlock(16, 16);
dim3 dimGrid((height + dimBlock.x -1)/dimBlock.x, (width + dimBlock.y -1)/dimBlock.y);
hipLaunchKernelGGL(( PolygonSideMapConstructing::createPolygonSide), dim3(dimGrid), dim3(dimBlock), 0, 0, sideData, graphData, width, height);
hipDeviceSynchronize();
}
void PolygonSideMapConstructing::getCreatedMapData(std::vector<PolygonSide>& output, PolygonSide* d_sideData, int width,
int height)
{
PolygonSide* cellSideValues = new PolygonSide[width * height];
hipMemcpy(cellSideValues, d_sideData, width * height * sizeof(PolygonSide), hipMemcpyDeviceToHost);
output = std::vector<PolygonSide>{cellSideValues, cellSideValues + width * height};
delete[] cellSideValues;
} | 680baf4972c738c5ce5ab86e6ef5d590e0f42ba2.cu | #include "PolygonSideMapConstructing.hpp"
__device__ bool PolygonSideMapConstructing::isThereAnEdge(const Graph::byte& nodeEdges, GraphEdge direction)
{
return nodeEdges & static_cast<Graph::byte>(direction);
}
__device__ bool
PolygonSideMapConstructing::isThisQuarterForwardSlash(int row, int col, int width, const Graph::byte* graphData)
{
int idxOfGraphEntry = col + row * width;
Graph::byte checkedGraphData = graphData[idxOfGraphEntry];
return isThereAnEdge(checkedGraphData, GraphEdge::UPPER_LEFT);
}
__device__ bool
PolygonSideMapConstructing::isThisQuarterBackslash(int row, int col, int width, const Graph::byte* graphData)
{
int idxOfGraphEntry = col - 1 + row * width;
Graph::byte checkedGraphData = graphData[idxOfGraphEntry];
return isThereAnEdge(checkedGraphData, GraphEdge::UPPER_RIGHT);
}
__global__ void
PolygonSideMapConstructing::createPolygonSide(PolygonSide* sideData, const Graph::byte* graphData, int width, int height)
{
int row = threadIdx.x + (blockIdx.x * blockDim.x);
int col = threadIdx.y + (blockIdx.y * blockDim.y);
if(row < height && col < width)
{
int idxOfQuarter = col + row * width;
PolygonSide::Type currentQuarterSideType = PolygonSide::Type::Point;
if(row != 0 && col != 0)
{
if(isThisQuarterBackslash(row, col, width, graphData))
currentQuarterSideType = PolygonSide::Type::Backslash;
else if(isThisQuarterForwardSlash(row, col, width, graphData))
currentQuarterSideType = PolygonSide::Type::ForwardSlash;
}
PolygonSide::info_type numberOfRegionsUsingPointA = 0;
if(row > 0 && col > 0 && currentQuarterSideType == PolygonSide::Type::Point)
{
Graph::byte currentNodeGraphData = graphData[idxOfQuarter];
Graph::byte upperLeftNodeGraphData = graphData[idxOfQuarter - width - 1];
bool isCurrentNodeConnectedUp = isThereAnEdge(currentNodeGraphData, GraphEdge::UP);
bool isCurrentNodeConnectedLeft = isThereAnEdge(currentNodeGraphData, GraphEdge::LEFT);
bool isUpperLeftNodeConnectedDown = isThereAnEdge(upperLeftNodeGraphData, GraphEdge::DOWN);
bool isUpperLeftNodeConnectedRight = isThereAnEdge(upperLeftNodeGraphData, GraphEdge::RIGHT);
numberOfRegionsUsingPointA =
(4 - isCurrentNodeConnectedUp - isCurrentNodeConnectedLeft - isUpperLeftNodeConnectedDown -
isUpperLeftNodeConnectedRight) << 5;
}
PolygonSide::point_type rowA = static_cast<PolygonSide::point_type>(row * 100);
PolygonSide::point_type colA = static_cast<PolygonSide::point_type>(col * 100);
PolygonSide::point_type rowB = static_cast<PolygonSide::point_type>(row * 100);
PolygonSide::point_type colB = static_cast<PolygonSide::point_type>(col * 100);
if(currentQuarterSideType == PolygonSide::Type::ForwardSlash)
{
rowA -= 25.0f;
colA += 25.0f;
rowB += 25.0f;
colB -= 25.0f;
}
else if(currentQuarterSideType == PolygonSide::Type::Backslash)
{
rowA -= 25.0f;
colA -= 25.0f;
rowB += 25.0f;
colB += 25.0f;
}
sideData[idxOfQuarter].info = numberOfRegionsUsingPointA | static_cast<PolygonSide::info_type>(currentQuarterSideType);
sideData[idxOfQuarter].pointA[0] = rowA;
sideData[idxOfQuarter].pointA[1] = colA;
sideData[idxOfQuarter].pointB[0] = rowB;
sideData[idxOfQuarter].pointB[1] = colB;
}
}
void PolygonSideMapConstructing::createMap(PolygonSide* sideData, const Graph::byte* graphData, int width, int height)
{
dim3 dimBlock(16, 16);
dim3 dimGrid((height + dimBlock.x -1)/dimBlock.x, (width + dimBlock.y -1)/dimBlock.y);
PolygonSideMapConstructing::createPolygonSide<<<dimGrid, dimBlock>>>(sideData, graphData, width, height);
cudaDeviceSynchronize();
}
void PolygonSideMapConstructing::getCreatedMapData(std::vector<PolygonSide>& output, PolygonSide* d_sideData, int width,
int height)
{
PolygonSide* cellSideValues = new PolygonSide[width * height];
cudaMemcpy(cellSideValues, d_sideData, width * height * sizeof(PolygonSide), cudaMemcpyDeviceToHost);
output = std::vector<PolygonSide>{cellSideValues, cellSideValues + width * height};
delete[] cellSideValues;
} |
dec10c41b2cf84889cbbd2770ac967a5689dd20b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* CUDAKernels.cu
* Copyright (C) 2009-2017 by MegaMol Team
* Alle Rechte vorbehalten.
*/
#include "SombreroKernels.cuh"
using namespace megamol;
using namespace megamol::protein_cuda;
__constant__ float C_PI = 3.141592653589f;
/**
* CUDA symbol for the face grouping will be false if no
* further changes where made to the face IDs.
*/
__device__ bool changes_d;
/**
* Smaller operator for edges.
*/
__host__ __device__ bool operator<(const SombreroKernels::Edge& lhs, const SombreroKernels::Edge& rhs) {
return lhs.vertex_id_1 < rhs.vertex_id_1;
}
/**
* Sort edges ascending to the face_id_0.
*/
struct FaceID0Cmp {
__host__ __device__ bool operator()(const SombreroKernels::Edge& lhs, const SombreroKernels::Edge& rhs) {
return lhs.face_id_0 < rhs.face_id_0;
}
};
/**
* Sort edges ascending to the face_id_1.
*/
struct FaceID1Cmp {
__host__ __device__ bool operator()(const SombreroKernels::Edge& lhs, const SombreroKernels::Edge& rhs) {
return lhs.face_id_1 < rhs.face_id_1;
}
};
/**
* Sort edges ascending to the vertex_id_0.
*/
struct VertexID0Cmp {
__host__ __device__ bool operator()(const SombreroKernels::Edge& lhs, const SombreroKernels::Edge& rhs) {
return lhs.vertex_id_0 < rhs.vertex_id_0;
}
};
/**
* Sort edges ascending to the vertex_id_1.
*/
struct VertexID1Cmp {
__host__ __device__ bool operator()(const SombreroKernels::Edge& lhs, const SombreroKernels::Edge& rhs) {
return lhs.vertex_id_1 < rhs.vertex_id_1;
}
};
/**
* Get the thread index based on the current CUDE grid dimensions.
*
* @return Returns the thread index based on the current CUDA grid
* dimensions.
*/
__device__ uint GetCurThreadIndex() {
return __umul24(__umul24(blockIdx.y, gridDim.x) + blockIdx.x, blockDim.x) + threadIdx.x;
}
/**
* Get the IDs of each neighbour for a every vertex.
*
* @param p_neighbour_ids Will contain the IDs of neighbouring vertices
* @param p_valid_z_values Remebers if a vertex is valid.
* @param p_vertex_edge_offset The edges that contain the vertex.
* @param p_vertex_edge_offset_depth The number of edges per vertex.
* @param p_vertex_cnt The number of vertices in the mesh.
*/
__global__ void GetNeighbourIds(uint* p_neighbour_ids, bool* p_valid_z_values,
const SombreroKernels::Edge* p_vertex_edge_offset, const uint* p_vertex_edge_offset_depth, uint p_vertex_cnt,
uint p_edge_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_vertex_cnt) return;
if (!p_valid_z_values[idx]) return;
uint begin = p_vertex_edge_offset_depth[idx];
uint end = p_vertex_edge_offset_depth[idx + 1];
if (idx == p_vertex_cnt - 1) {
end = p_edge_cnt; // necessary for the last vertex
}
uint neighbour_id;
// Find the IDs of neighbouring vertices.
for (uint i = begin; i < end; i++) {
SombreroKernels::Edge cur = p_vertex_edge_offset[i];
if (cur.vertex_id_0 == idx)
neighbour_id = cur.vertex_id_1;
else
neighbour_id = cur.vertex_id_0;
p_neighbour_ids[i] = neighbour_id;
}
}
/**
* Create the edges of the mesh based on the faces. For each
* face three edges are created. Each edge will exist twice
* facing opposit directions.
*
* @param p_faces The list of faces in the mesh. Each face
* consists of three vertex IDs.
* @param p_edge Will contain the edges of the mesh.
* @param p_face_cnt The number of faces in the mesh.
*/
__global__ void InitEdges(const uint3* p_faces, SombreroKernels::Edge* p_edges, uint p_face_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_face_cnt) return;
// First edge.
{
SombreroKernels::Edge cur = p_edges[idx * 3];
cur.face_id_0 = idx;
cur.face_id_1 = -1;
cur.vertex_id_0 = p_faces[idx].x;
cur.vertex_id_1 = p_faces[idx].y;
cur.edge_id = idx * 3;
p_edges[idx * 3] = cur;
}
// Second edge.
{
SombreroKernels::Edge cur = p_edges[idx * 3 + 1];
cur.face_id_0 = idx;
cur.face_id_1 = -1;
cur.vertex_id_0 = p_faces[idx].y;
cur.vertex_id_1 = p_faces[idx].z;
cur.edge_id = idx * 3 + 1;
p_edges[idx * 3 + 1] = cur;
}
// Third edge.
{
SombreroKernels::Edge cur = p_edges[idx * 3 + 2];
cur.face_id_0 = idx;
cur.face_id_1 = -1;
cur.vertex_id_0 = p_faces[idx].z;
cur.vertex_id_1 = p_faces[idx].x;
cur.edge_id = idx * 3 + 2;
p_edges[idx * 3 + 2] = cur;
}
}
/**
* Match the edges so that each edge has a full set of faces
* to which it belongs. Therefore search the sorted edges
* for the same edge that faces the opposit direction. The
* sorted edges are sorted ascending to the vertex_id_1.
*
* @param p_edges The list of edges of the mesh.
* @param p_sorted_edges The sorted edges of the mesh.
* @param p_edge_offset The offset that points to the index
* in the p_sorted_edges for which vertex_id_1 == vertex_id_0
* of the current edge.
* @param p_edge_cnt The number of edges in the mesh (x2).
*/
__global__ void MatchEdges(
SombreroKernels::Edge* p_edges, SombreroKernels::Edge* p_sorted_edges, uint* p_edge_offset, uint p_edge_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_edge_cnt) return;
// Get current edge and check if it is already matched.
SombreroKernels::Edge cur = p_edges[idx];
if (cur.face_id_1 != -1) return;
// Find the same edge faceing in the opposit direction.
uint begin = p_edge_offset[cur.vertex_id_0];
uint end = p_edge_offset[cur.vertex_id_0 + 1];
for (uint i = begin; i < end; i++) {
uint id = p_sorted_edges[i].edge_id;
if (i == idx) continue;
if (p_edges[id].face_id_1 != -1) continue;
if (cur.vertex_id_0 == p_edges[id].vertex_id_1 && cur.vertex_id_1 == p_edges[id].vertex_id_0) {
// Found the edge.
cur.face_id_1 = p_edges[id].face_id_0;
cur.opposite_edge_id = id;
p_edges[id].face_id_1 = cur.face_id_0;
p_edges[id].opposite_edge_id = cur.edge_id;
p_edges[idx] = cur;
break;
}
}
}
/**
* Find all edges that belong to a certain face by looping over the edges
* of the mesh.
*
* @param p_face_edge_offset Will contain the edges that belong to a face.
* @param p_face_id_0_offset Contains the edges sorted ascending for face_id_0
* @param p_face_id_1_offset Contains the edges sorted ascending for face_id_1
* @param depth The maximum number of edges per face.
* @param p_face_cnt The number of faces in the mesh.
*/
__global__ void SetFaceEdgeOffset(SombreroKernels::Edge* p_face_edge_offset, SombreroKernels::Edge* p_face_id_0_offset,
SombreroKernels::Edge* p_face_id_1_offset, uint depth, uint p_face_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_face_cnt) return;
// Find edges that belong to the face.
uint cur_depth = idx * depth;
uint begin = idx * 3;
uint end = (idx + 1) * 3;
for (uint i = begin; i < end; i++) {
p_face_edge_offset[cur_depth++] = p_face_id_0_offset[i];
p_face_edge_offset[cur_depth++] = p_face_id_1_offset[i];
}
}
/**
* Find all edges that belong to a certain vertex by looping over the edges
* of the mesh.
*
* @param p_vertex_edge_offset Will contain the edges that belong to a vertex.
* @param p_vertex_id_0_sorted Contains the edges sorted ascending for vertex_id_0
* @param p_vertex_id_1_sorted Contains the edges sorted ascending for vertex_id_1
* @param p_vertex_id_0_offset Points to the first edge in p_vertex_id_0_sorted
* with the vertex_id_0 == idx
* @param p_vertex_id_1_offset Points to the first edge in p_vertex_id_1_sorted
* with the vertex_id_1 == idx
* @param depth The maximum number of edges per vertex.
* @param p_vertex_cnt The number of vertices in the mesh.
*/
__global__ void SetVertexEdgeOffset(SombreroKernels::Edge* p_vertex_edge_offset,
SombreroKernels::Edge* p_vertex_id_0_sorted, SombreroKernels::Edge* p_vertex_id_1_sorted,
uint* p_vertex_id_0_offset, uint* p_vertex_id_1_offset, uint* depth, uint p_vertex_cnt, uint p_edge_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_vertex_cnt) return;
// Find edges that belong to the vertex.
uint cur_depth = depth[idx];
// Edges with vertex_id_0 == idx
uint begin = p_vertex_id_0_offset[idx];
uint end = p_vertex_id_0_offset[idx + 1];
if (idx == p_vertex_cnt - 1) {
end = p_edge_cnt; // necessary for the last vertex
}
for (uint i = begin; i < end; i++) {
p_vertex_edge_offset[cur_depth++] = p_vertex_id_0_sorted[i];
}
// Edges with vertex_id_1 == idx
begin = p_vertex_id_1_offset[idx];
end = p_vertex_id_1_offset[idx + 1];
for (uint i = begin; i < end; i++) {
p_vertex_edge_offset[cur_depth++] = p_vertex_id_1_sorted[i];
}
}
/**
* Compute the average phi value in the neighbourhood and assign it to
* the current vertex. Take care of special cases arround the boundary
* meridian by assignig the vertices on the meridian the phi value of
* 0 if they are approached from the "right" and a value of 2*pi if they
* are approached from the "left".
*
* @param p_phivalues_in The phi values of the last iteration (input)
* @param p_phivalues_out The phi value of the current iteration (output)
* @param p_valid_phi_values Remebers if a vertex is valid.
* @param p_vertex_neighbours The IDs of the neighbouring vertices.
* @param p_vertex_edge_offset_depth The number of edges per vertex.
* @param p_vertex_type The type of the vertex: -1: Pole, 0: vertex
* is not on the meridian or a neighbour, 1: vertex is on the meridian,
* 2: vertex is on the "right" side of the meridian and 3: vertex is
* on the "left" side of the meridian.
* @param p_vertex_neighbours_offset Contains how many neighbours are of
* type -1.
* @param p_vertex_cnt The number of vertices in the mesh.
*/
__global__ void SetPhiValues(float* p_phivalues_in, float* p_phivalues_out, bool* p_valid_phi_values,
const uint* p_vertex_neighbours, const uint* p_vertex_edge_offset_depth, const int* p_vertex_type,
const uint* p_vertex_neighbours_offset, uint p_vertex_cnt, uint p_edge_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_vertex_cnt) return;
if (!p_valid_phi_values[idx]) return;
uint begin = p_vertex_edge_offset_depth[idx] + p_vertex_neighbours_offset[idx];
uint end = p_vertex_edge_offset_depth[idx + 1];
if (idx == p_vertex_cnt - 1) {
end = p_edge_cnt; // necessary for the last vertex
}
float count = end - begin;
float tmp = 0.0f;
// Add up the phivalues of the neighbouring vertices and increase the counter.
if (p_vertex_type[idx] == 0 || p_vertex_type[idx] == 2) {
for (uint i = begin; i < end; i++) {
tmp += p_phivalues_in[p_vertex_neighbours[i]];
}
} else {
/**
* Since vertices with the types -1 and 1 are remembered in the
* p_valid_phi_vlaues we can be shure that the vertices here are
* of type 3.
*/
for (uint i = begin; i < end; i++) {
if (p_vertex_type[p_vertex_neighbours[i]] != 1) {
tmp += p_phivalues_in[p_vertex_neighbours[i]];
} else {
tmp += 2.0f * C_PI;
}
}
}
float tmp_phi = (tmp / count) - p_phivalues_in[idx];
p_phivalues_out[idx] = p_phivalues_in[idx] + tmp_phi * 1.1f;
}
/**
* Compute the average z value in the neighbourhood and assign it to the
* current vertex.
*
* @param p_zvalues The z value of each vertex.
* @param p_valid_z_values Remebers if a vertex is valid.
* @param p_vertex_neighbours The IDs of the neighbouring vertices.
* @param p_vertex_edge_offset_depth The number of edges per vertex.
* @param p_vertex_multiplicity The multiplicity factors of the vertex
* @param p_vertex_cnt The number of vertices in the mesh.
* @param p_edge_cnt The total number of edges of the mesh.
*/
__global__ void SetZValues(float* p_zvalues, bool* p_valid_z_values, const uint* p_vertex_neighbours,
const uint* p_vertex_edge_offset_depth, const uint* p_vertex_multiplicity, uint p_vertex_cnt, uint p_edge_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_vertex_cnt) return;
if (!p_valid_z_values[idx]) return;
uint begin = p_vertex_edge_offset_depth[idx];
uint end = p_vertex_edge_offset_depth[idx + 1];
if (idx == p_vertex_cnt - 1) {
end = p_edge_cnt; // necessary for the last vertex
}
float tmp = 0.0f;
float multCount = 0.0f;
// Add up the zvalues of the neighbouring vertices and increase the counter.
for (uint i = begin; i < end; i++) {
uint mult = p_vertex_multiplicity[p_vertex_neighbours[i]];
for (uint j = 0; j < mult; j++) {
tmp += p_zvalues[p_vertex_neighbours[i]];
multCount += 1.0f;
}
}
float tmp_z = (tmp / multCount) - p_zvalues[idx];
p_zvalues[idx] = p_zvalues[idx] + tmp_z * 1.1f;
}
/**
* Sort the neighbour IDs ascending to the types of the neighbours.
*
* @param p_neighbour_ids The IDs of neighbouring vertices, will be sorted.
* @param p_neighbour_ids_offset Will contain how many neighbours have an ID of -1.
* @param p_valid_z_values Remebers if a vertex is valid.
* @param p_vertex_edge_offset_depth The number of edges per vertex.
* @param p_vertex_type The type of the vertex: -1: Pole, 0: vertex
* is not on the meridian or a neighbour, 1: vertex is on the meridian,
* 2: vertex is on the "right" side of the meridian and 3: vertex is
* on the "left" side of the meridian.
* @param p_vertex_cnt The number of vertices in the mesh.
*/
__global__ void SortNeighbourIds(uint* p_neighbour_ids, uint* p_neighbour_ids_offset, bool* p_valid_z_values,
const uint* p_vertex_edge_offset_depth, const int* p_vertex_type, uint p_vertex_cnt, uint p_edge_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_vertex_cnt) return;
if (!p_valid_z_values[idx]) return;
uint begin = p_vertex_edge_offset_depth[idx];
uint end = p_vertex_edge_offset_depth[idx + 1];
if (idx == p_vertex_cnt - 1) {
end = p_edge_cnt; // necessary for the last vertex
}
uint offset = p_neighbour_ids_offset[idx];
int type;
uint buffer;
// Sort the IDs according to the type.
for (uint i = begin; i < end; i++) {
type = p_vertex_type[p_neighbour_ids[i]];
if (type == -1) {
buffer = p_neighbour_ids[begin + offset];
p_neighbour_ids[begin + offset] = p_neighbour_ids[i];
p_neighbour_ids[i] = buffer;
offset++;
}
}
p_neighbour_ids_offset[idx] = offset;
}
/*
* SombreroKernels::~CUDAKernels
*/
SombreroKernels::~SombreroKernels(void) {}
/*
* SombreroKernels::CreatePhiValues
*/
bool SombreroKernels::CreatePhiValues(const float p_threshold, std::vector<float>& p_phi_values,
std::vector<bool> p_valid_phi_values, const std::vector<std::vector<Edge>>& p_vertex_edge_offset,
const std::vector<uint>& p_vertex_edge_offset_depth, const std::vector<int>& p_vertex_type) {
// Convert vertex edge offset to CUDA
uint vertex_cnt = static_cast<uint>(p_phi_values.size());
std::vector<Edge> cuda_vertex_offset;
cuda_vertex_offset.reserve(static_cast<size_t>(p_vertex_edge_offset_depth.back()) * 30);
for (const auto& offset : p_vertex_edge_offset) {
for (const auto& edge : offset) {
cuda_vertex_offset.push_back(edge);
}
}
// Store the vertex IDs of neighbouring vertices.
std::vector<uint> vertex_neighbours = std::vector<uint>(cuda_vertex_offset.size());
// Store the offset for neighbours with the type of -1.
std::vector<uint> vertex_neighbours_offset = std::vector<uint>(vertex_cnt, 0);
// Upload data and delete local copy.
thrust::device_vector<float> p_phi_values_one_d = p_phi_values;
thrust::device_vector<float> p_phi_values_two_d = p_phi_values;
thrust::device_vector<bool> p_valid_phi_values_d = p_valid_phi_values;
thrust::device_vector<Edge> cuda_vertex_offset_d = cuda_vertex_offset;
thrust::device_vector<uint> p_vertex_edge_offset_depth_d = p_vertex_edge_offset_depth;
thrust::device_vector<int> p_vertex_type_d = p_vertex_type;
thrust::device_vector<uint> vertex_neighbours_d = vertex_neighbours;
thrust::device_vector<uint> vertex_neighbours_offset_d = vertex_neighbours_offset;
uint edge_cnt = static_cast<uint>(cuda_vertex_offset.size());
cuda_vertex_offset.clear();
cuda_vertex_offset.shrink_to_fit();
vertex_neighbours.clear();
vertex_neighbours.shrink_to_fit();
vertex_neighbours_offset.clear();
vertex_neighbours_offset.shrink_to_fit();
// Get the neighbours of every vertex.
hipLaunchKernelGGL(( GetNeighbourIds), dim3(Grid(vertex_cnt, 256)), dim3(256), 0, 0, thrust::raw_pointer_cast(vertex_neighbours_d.data().get()),
thrust::raw_pointer_cast(p_valid_phi_values_d.data().get()),
thrust::raw_pointer_cast(cuda_vertex_offset_d.data().get()),
thrust::raw_pointer_cast(p_vertex_edge_offset_depth_d.data().get()), vertex_cnt, edge_cnt);
checkCudaErrors(hipDeviceSynchronize());
// Get the offsets for the neighbours with the type of -1.
hipLaunchKernelGGL(( SortNeighbourIds), dim3(Grid(vertex_cnt, 256)), dim3(256), 0, 0, thrust::raw_pointer_cast(vertex_neighbours_d.data().get()),
thrust::raw_pointer_cast(vertex_neighbours_offset_d.data().get()),
thrust::raw_pointer_cast(p_valid_phi_values_d.data().get()),
thrust::raw_pointer_cast(p_vertex_edge_offset_depth_d.data().get()),
thrust::raw_pointer_cast(p_vertex_type_d.data().get()), vertex_cnt, edge_cnt);
checkCudaErrors(hipDeviceSynchronize());
// Perform iterations.
float diff = 2.0f * p_threshold;
size_t round = 0;
while (diff > p_threshold) {
hipLaunchKernelGGL(( SetPhiValues), dim3(Grid(vertex_cnt, 256)), dim3(256), 0, 0, thrust::raw_pointer_cast(p_phi_values_one_d.data().get()),
thrust::raw_pointer_cast(p_phi_values_two_d.data().get()),
thrust::raw_pointer_cast(p_valid_phi_values_d.data().get()),
thrust::raw_pointer_cast(vertex_neighbours_d.data().get()),
thrust::raw_pointer_cast(p_vertex_edge_offset_depth_d.data().get()),
thrust::raw_pointer_cast(p_vertex_type_d.data().get()),
thrust::raw_pointer_cast(vertex_neighbours_offset_d.data().get()), vertex_cnt, edge_cnt);
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( SetPhiValues), dim3(Grid(vertex_cnt, 256)), dim3(256), 0, 0, thrust::raw_pointer_cast(p_phi_values_two_d.data().get()),
thrust::raw_pointer_cast(p_phi_values_one_d.data().get()),
thrust::raw_pointer_cast(p_valid_phi_values_d.data().get()),
thrust::raw_pointer_cast(vertex_neighbours_d.data().get()),
thrust::raw_pointer_cast(p_vertex_edge_offset_depth_d.data().get()),
thrust::raw_pointer_cast(p_vertex_type_d.data().get()),
thrust::raw_pointer_cast(vertex_neighbours_offset_d.data().get()), vertex_cnt, edge_cnt);
checkCudaErrors(hipDeviceSynchronize());
// Check the difference between the two.
if (round % 5000 == 0) {
float first_sum = thrust::reduce(p_phi_values_two_d.begin(), p_phi_values_two_d.end());
float second_sum = thrust::reduce(p_phi_values_one_d.begin(), p_phi_values_one_d.end());
diff = abs(second_sum - first_sum);
}
round++;
}
thrust::copy(p_phi_values_one_d.begin(), p_phi_values_one_d.end(), p_phi_values.begin());
return true;
}
/*
* SombreroKernels::CreateZValues
*/
bool SombreroKernels::CreateZValues(const uint p_iterations, std::vector<float>& p_zvalues,
std::vector<bool> p_valid_z_values, const std::vector<std::vector<Edge>>& p_vertex_edge_offset,
const std::vector<uint>& p_vertex_edge_offset_depth, const std::vector<uint>& p_vertex_multiplicity) {
// Convert vertex edge offset to CUDA
uint vertex_cnt = static_cast<uint>(p_zvalues.size());
std::vector<SombreroKernels::Edge> cuda_vertex_offset;
cuda_vertex_offset.reserve(static_cast<size_t>(p_vertex_edge_offset_depth.back()) * 30);
for (const auto& offset : p_vertex_edge_offset) {
for (const auto& edge : offset) {
cuda_vertex_offset.push_back(edge);
}
}
// Store the vertex IDs of neighbouring vertices.
std::vector<uint> vertex_neighbours = std::vector<uint>(cuda_vertex_offset.size());
// Upload data and delete local copy.
thrust::device_vector<float> p_zvalues_d = p_zvalues;
thrust::device_vector<bool> p_valid_z_values_d = p_valid_z_values;
thrust::device_vector<SombreroKernels::Edge> cuda_vertex_offset_d = cuda_vertex_offset;
thrust::device_vector<uint> p_vertex_edge_offset_depth_d = p_vertex_edge_offset_depth;
thrust::device_vector<uint> vertex_neighbours_d = vertex_neighbours;
thrust::device_vector<uint> p_vertex_multiplicity_d = p_vertex_multiplicity;
uint edge_cnt = static_cast<uint>(cuda_vertex_offset.size());
cuda_vertex_offset.clear();
cuda_vertex_offset.shrink_to_fit();
vertex_neighbours.clear();
vertex_neighbours.shrink_to_fit();
// Get the neighbours of every vertex.
hipLaunchKernelGGL(( GetNeighbourIds), dim3(Grid(vertex_cnt, 256)), dim3(256), 0, 0, thrust::raw_pointer_cast(vertex_neighbours_d.data().get()),
thrust::raw_pointer_cast(p_valid_z_values_d.data().get()),
thrust::raw_pointer_cast(cuda_vertex_offset_d.data().get()),
thrust::raw_pointer_cast(p_vertex_edge_offset_depth_d.data().get()), vertex_cnt, edge_cnt);
checkCudaErrors(hipDeviceSynchronize());
// Perform iterations.
for (uint i = 0; i < p_iterations; i++) {
hipLaunchKernelGGL(( SetZValues), dim3(Grid(vertex_cnt, 256)), dim3(256), 0, 0, thrust::raw_pointer_cast(p_zvalues_d.data().get()),
thrust::raw_pointer_cast(p_valid_z_values_d.data().get()),
thrust::raw_pointer_cast(vertex_neighbours_d.data().get()),
thrust::raw_pointer_cast(p_vertex_edge_offset_depth_d.data().get()),
thrust::raw_pointer_cast(p_vertex_multiplicity_d.data().get()), vertex_cnt, edge_cnt);
checkCudaErrors(hipDeviceSynchronize());
}
thrust::copy(p_zvalues_d.begin(), p_zvalues_d.end(), p_zvalues.begin());
return true;
}
/*
* SombreroKernels::SombreroKernels
*/
SombreroKernels::SombreroKernels(void) {}
/*
* SombreroKernels::SortEdges
*/
bool SombreroKernels::SortEdges(std::vector<Edge>& p_edges, const uint p_id) {
// Upload the data.
thrust::device_vector<Edge> edges_d = p_edges;
// Sort the data.
if (p_id == 0) {
thrust::sort(edges_d.begin(), edges_d.end(), VertexID0Cmp());
} else if (p_id == 1) {
thrust::sort(edges_d.begin(), edges_d.end(), VertexID1Cmp());
} else {
return false;
}
// Download the data.
thrust::copy(edges_d.begin(), edges_d.end(), p_edges.begin());
return true;
}
| dec10c41b2cf84889cbbd2770ac967a5689dd20b.cu | /*
* CUDAKernels.cu
* Copyright (C) 2009-2017 by MegaMol Team
* Alle Rechte vorbehalten.
*/
#include "SombreroKernels.cuh"
using namespace megamol;
using namespace megamol::protein_cuda;
__constant__ float C_PI = 3.141592653589f;
/**
* CUDA symbol for the face grouping will be false if no
* further changes where made to the face IDs.
*/
__device__ bool changes_d;
/**
* Smaller operator for edges.
*/
__host__ __device__ bool operator<(const SombreroKernels::Edge& lhs, const SombreroKernels::Edge& rhs) {
return lhs.vertex_id_1 < rhs.vertex_id_1;
}
/**
* Sort edges ascending to the face_id_0.
*/
struct FaceID0Cmp {
__host__ __device__ bool operator()(const SombreroKernels::Edge& lhs, const SombreroKernels::Edge& rhs) {
return lhs.face_id_0 < rhs.face_id_0;
}
};
/**
* Sort edges ascending to the face_id_1.
*/
struct FaceID1Cmp {
__host__ __device__ bool operator()(const SombreroKernels::Edge& lhs, const SombreroKernels::Edge& rhs) {
return lhs.face_id_1 < rhs.face_id_1;
}
};
/**
* Sort edges ascending to the vertex_id_0.
*/
struct VertexID0Cmp {
__host__ __device__ bool operator()(const SombreroKernels::Edge& lhs, const SombreroKernels::Edge& rhs) {
return lhs.vertex_id_0 < rhs.vertex_id_0;
}
};
/**
* Sort edges ascending to the vertex_id_1.
*/
struct VertexID1Cmp {
__host__ __device__ bool operator()(const SombreroKernels::Edge& lhs, const SombreroKernels::Edge& rhs) {
return lhs.vertex_id_1 < rhs.vertex_id_1;
}
};
/**
* Get the thread index based on the current CUDE grid dimensions.
*
* @return Returns the thread index based on the current CUDA grid
* dimensions.
*/
__device__ uint GetCurThreadIndex() {
return __umul24(__umul24(blockIdx.y, gridDim.x) + blockIdx.x, blockDim.x) + threadIdx.x;
}
/**
* Get the IDs of each neighbour for a every vertex.
*
* @param p_neighbour_ids Will contain the IDs of neighbouring vertices
* @param p_valid_z_values Remebers if a vertex is valid.
* @param p_vertex_edge_offset The edges that contain the vertex.
* @param p_vertex_edge_offset_depth The number of edges per vertex.
* @param p_vertex_cnt The number of vertices in the mesh.
*/
__global__ void GetNeighbourIds(uint* p_neighbour_ids, bool* p_valid_z_values,
const SombreroKernels::Edge* p_vertex_edge_offset, const uint* p_vertex_edge_offset_depth, uint p_vertex_cnt,
uint p_edge_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_vertex_cnt) return;
if (!p_valid_z_values[idx]) return;
uint begin = p_vertex_edge_offset_depth[idx];
uint end = p_vertex_edge_offset_depth[idx + 1];
if (idx == p_vertex_cnt - 1) {
end = p_edge_cnt; // necessary for the last vertex
}
uint neighbour_id;
// Find the IDs of neighbouring vertices.
for (uint i = begin; i < end; i++) {
SombreroKernels::Edge cur = p_vertex_edge_offset[i];
if (cur.vertex_id_0 == idx)
neighbour_id = cur.vertex_id_1;
else
neighbour_id = cur.vertex_id_0;
p_neighbour_ids[i] = neighbour_id;
}
}
/**
* Create the edges of the mesh based on the faces. For each
* face three edges are created. Each edge will exist twice
* facing opposit directions.
*
* @param p_faces The list of faces in the mesh. Each face
* consists of three vertex IDs.
* @param p_edge Will contain the edges of the mesh.
* @param p_face_cnt The number of faces in the mesh.
*/
__global__ void InitEdges(const uint3* p_faces, SombreroKernels::Edge* p_edges, uint p_face_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_face_cnt) return;
// First edge.
{
SombreroKernels::Edge cur = p_edges[idx * 3];
cur.face_id_0 = idx;
cur.face_id_1 = -1;
cur.vertex_id_0 = p_faces[idx].x;
cur.vertex_id_1 = p_faces[idx].y;
cur.edge_id = idx * 3;
p_edges[idx * 3] = cur;
}
// Second edge.
{
SombreroKernels::Edge cur = p_edges[idx * 3 + 1];
cur.face_id_0 = idx;
cur.face_id_1 = -1;
cur.vertex_id_0 = p_faces[idx].y;
cur.vertex_id_1 = p_faces[idx].z;
cur.edge_id = idx * 3 + 1;
p_edges[idx * 3 + 1] = cur;
}
// Third edge.
{
SombreroKernels::Edge cur = p_edges[idx * 3 + 2];
cur.face_id_0 = idx;
cur.face_id_1 = -1;
cur.vertex_id_0 = p_faces[idx].z;
cur.vertex_id_1 = p_faces[idx].x;
cur.edge_id = idx * 3 + 2;
p_edges[idx * 3 + 2] = cur;
}
}
/**
* Match the edges so that each edge has a full set of faces
* to which it belongs. Therefore search the sorted edges
* for the same edge that faces the opposit direction. The
* sorted edges are sorted ascending to the vertex_id_1.
*
* @param p_edges The list of edges of the mesh.
* @param p_sorted_edges The sorted edges of the mesh.
* @param p_edge_offset The offset that points to the index
* in the p_sorted_edges for which vertex_id_1 == vertex_id_0
* of the current edge.
* @param p_edge_cnt The number of edges in the mesh (x2).
*/
__global__ void MatchEdges(
SombreroKernels::Edge* p_edges, SombreroKernels::Edge* p_sorted_edges, uint* p_edge_offset, uint p_edge_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_edge_cnt) return;
// Get current edge and check if it is already matched.
SombreroKernels::Edge cur = p_edges[idx];
if (cur.face_id_1 != -1) return;
// Find the same edge faceing in the opposit direction.
uint begin = p_edge_offset[cur.vertex_id_0];
uint end = p_edge_offset[cur.vertex_id_0 + 1];
for (uint i = begin; i < end; i++) {
uint id = p_sorted_edges[i].edge_id;
if (i == idx) continue;
if (p_edges[id].face_id_1 != -1) continue;
if (cur.vertex_id_0 == p_edges[id].vertex_id_1 && cur.vertex_id_1 == p_edges[id].vertex_id_0) {
// Found the edge.
cur.face_id_1 = p_edges[id].face_id_0;
cur.opposite_edge_id = id;
p_edges[id].face_id_1 = cur.face_id_0;
p_edges[id].opposite_edge_id = cur.edge_id;
p_edges[idx] = cur;
break;
}
}
}
/**
* Find all edges that belong to a certain face by looping over the edges
* of the mesh.
*
* @param p_face_edge_offset Will contain the edges that belong to a face.
* @param p_face_id_0_offset Contains the edges sorted ascending for face_id_0
* @param p_face_id_1_offset Contains the edges sorted ascending for face_id_1
* @param depth The maximum number of edges per face.
* @param p_face_cnt The number of faces in the mesh.
*/
__global__ void SetFaceEdgeOffset(SombreroKernels::Edge* p_face_edge_offset, SombreroKernels::Edge* p_face_id_0_offset,
SombreroKernels::Edge* p_face_id_1_offset, uint depth, uint p_face_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_face_cnt) return;
// Find edges that belong to the face.
uint cur_depth = idx * depth;
uint begin = idx * 3;
uint end = (idx + 1) * 3;
for (uint i = begin; i < end; i++) {
p_face_edge_offset[cur_depth++] = p_face_id_0_offset[i];
p_face_edge_offset[cur_depth++] = p_face_id_1_offset[i];
}
}
/**
* Find all edges that belong to a certain vertex by looping over the edges
* of the mesh.
*
* @param p_vertex_edge_offset Will contain the edges that belong to a vertex.
* @param p_vertex_id_0_sorted Contains the edges sorted ascending for vertex_id_0
* @param p_vertex_id_1_sorted Contains the edges sorted ascending for vertex_id_1
* @param p_vertex_id_0_offset Points to the first edge in p_vertex_id_0_sorted
* with the vertex_id_0 == idx
* @param p_vertex_id_1_offset Points to the first edge in p_vertex_id_1_sorted
* with the vertex_id_1 == idx
* @param depth The maximum number of edges per vertex.
* @param p_vertex_cnt The number of vertices in the mesh.
*/
__global__ void SetVertexEdgeOffset(SombreroKernels::Edge* p_vertex_edge_offset,
SombreroKernels::Edge* p_vertex_id_0_sorted, SombreroKernels::Edge* p_vertex_id_1_sorted,
uint* p_vertex_id_0_offset, uint* p_vertex_id_1_offset, uint* depth, uint p_vertex_cnt, uint p_edge_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_vertex_cnt) return;
// Find edges that belong to the vertex.
uint cur_depth = depth[idx];
// Edges with vertex_id_0 == idx
uint begin = p_vertex_id_0_offset[idx];
uint end = p_vertex_id_0_offset[idx + 1];
if (idx == p_vertex_cnt - 1) {
end = p_edge_cnt; // necessary for the last vertex
}
for (uint i = begin; i < end; i++) {
p_vertex_edge_offset[cur_depth++] = p_vertex_id_0_sorted[i];
}
// Edges with vertex_id_1 == idx
begin = p_vertex_id_1_offset[idx];
end = p_vertex_id_1_offset[idx + 1];
for (uint i = begin; i < end; i++) {
p_vertex_edge_offset[cur_depth++] = p_vertex_id_1_sorted[i];
}
}
/**
* Compute the average phi value in the neighbourhood and assign it to
* the current vertex. Take care of special cases arround the boundary
* meridian by assignig the vertices on the meridian the phi value of
* 0 if they are approached from the "right" and a value of 2*pi if they
* are approached from the "left".
*
* @param p_phivalues_in The phi values of the last iteration (input)
* @param p_phivalues_out The phi value of the current iteration (output)
* @param p_valid_phi_values Remebers if a vertex is valid.
* @param p_vertex_neighbours The IDs of the neighbouring vertices.
* @param p_vertex_edge_offset_depth The number of edges per vertex.
* @param p_vertex_type The type of the vertex: -1: Pole, 0: vertex
* is not on the meridian or a neighbour, 1: vertex is on the meridian,
* 2: vertex is on the "right" side of the meridian and 3: vertex is
* on the "left" side of the meridian.
* @param p_vertex_neighbours_offset Contains how many neighbours are of
* type -1.
* @param p_vertex_cnt The number of vertices in the mesh.
*/
__global__ void SetPhiValues(float* p_phivalues_in, float* p_phivalues_out, bool* p_valid_phi_values,
const uint* p_vertex_neighbours, const uint* p_vertex_edge_offset_depth, const int* p_vertex_type,
const uint* p_vertex_neighbours_offset, uint p_vertex_cnt, uint p_edge_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_vertex_cnt) return;
if (!p_valid_phi_values[idx]) return;
uint begin = p_vertex_edge_offset_depth[idx] + p_vertex_neighbours_offset[idx];
uint end = p_vertex_edge_offset_depth[idx + 1];
if (idx == p_vertex_cnt - 1) {
end = p_edge_cnt; // necessary for the last vertex
}
float count = end - begin;
float tmp = 0.0f;
// Add up the phivalues of the neighbouring vertices and increase the counter.
if (p_vertex_type[idx] == 0 || p_vertex_type[idx] == 2) {
for (uint i = begin; i < end; i++) {
tmp += p_phivalues_in[p_vertex_neighbours[i]];
}
} else {
/**
* Since vertices with the types -1 and 1 are remembered in the
* p_valid_phi_vlaues we can be shure that the vertices here are
* of type 3.
*/
for (uint i = begin; i < end; i++) {
if (p_vertex_type[p_vertex_neighbours[i]] != 1) {
tmp += p_phivalues_in[p_vertex_neighbours[i]];
} else {
tmp += 2.0f * C_PI;
}
}
}
float tmp_phi = (tmp / count) - p_phivalues_in[idx];
p_phivalues_out[idx] = p_phivalues_in[idx] + tmp_phi * 1.1f;
}
/**
* Compute the average z value in the neighbourhood and assign it to the
* current vertex.
*
* @param p_zvalues The z value of each vertex.
* @param p_valid_z_values Remebers if a vertex is valid.
* @param p_vertex_neighbours The IDs of the neighbouring vertices.
* @param p_vertex_edge_offset_depth The number of edges per vertex.
* @param p_vertex_multiplicity The multiplicity factors of the vertex
* @param p_vertex_cnt The number of vertices in the mesh.
* @param p_edge_cnt The total number of edges of the mesh.
*/
__global__ void SetZValues(float* p_zvalues, bool* p_valid_z_values, const uint* p_vertex_neighbours,
const uint* p_vertex_edge_offset_depth, const uint* p_vertex_multiplicity, uint p_vertex_cnt, uint p_edge_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_vertex_cnt) return;
if (!p_valid_z_values[idx]) return;
uint begin = p_vertex_edge_offset_depth[idx];
uint end = p_vertex_edge_offset_depth[idx + 1];
if (idx == p_vertex_cnt - 1) {
end = p_edge_cnt; // necessary for the last vertex
}
float tmp = 0.0f;
float multCount = 0.0f;
// Add up the zvalues of the neighbouring vertices and increase the counter.
for (uint i = begin; i < end; i++) {
uint mult = p_vertex_multiplicity[p_vertex_neighbours[i]];
for (uint j = 0; j < mult; j++) {
tmp += p_zvalues[p_vertex_neighbours[i]];
multCount += 1.0f;
}
}
float tmp_z = (tmp / multCount) - p_zvalues[idx];
p_zvalues[idx] = p_zvalues[idx] + tmp_z * 1.1f;
}
/**
* Sort the neighbour IDs ascending to the types of the neighbours.
*
* @param p_neighbour_ids The IDs of neighbouring vertices, will be sorted.
* @param p_neighbour_ids_offset Will contain how many neighbours have an ID of -1.
* @param p_valid_z_values Remebers if a vertex is valid.
* @param p_vertex_edge_offset_depth The number of edges per vertex.
* @param p_vertex_type The type of the vertex: -1: Pole, 0: vertex
* is not on the meridian or a neighbour, 1: vertex is on the meridian,
* 2: vertex is on the "right" side of the meridian and 3: vertex is
* on the "left" side of the meridian.
* @param p_vertex_cnt The number of vertices in the mesh.
*/
__global__ void SortNeighbourIds(uint* p_neighbour_ids, uint* p_neighbour_ids_offset, bool* p_valid_z_values,
const uint* p_vertex_edge_offset_depth, const int* p_vertex_type, uint p_vertex_cnt, uint p_edge_cnt) {
const uint idx = GetCurThreadIndex();
if (idx >= p_vertex_cnt) return;
if (!p_valid_z_values[idx]) return;
uint begin = p_vertex_edge_offset_depth[idx];
uint end = p_vertex_edge_offset_depth[idx + 1];
if (idx == p_vertex_cnt - 1) {
end = p_edge_cnt; // necessary for the last vertex
}
uint offset = p_neighbour_ids_offset[idx];
int type;
uint buffer;
// Sort the IDs according to the type.
for (uint i = begin; i < end; i++) {
type = p_vertex_type[p_neighbour_ids[i]];
if (type == -1) {
buffer = p_neighbour_ids[begin + offset];
p_neighbour_ids[begin + offset] = p_neighbour_ids[i];
p_neighbour_ids[i] = buffer;
offset++;
}
}
p_neighbour_ids_offset[idx] = offset;
}
/*
* SombreroKernels::~CUDAKernels
*/
SombreroKernels::~SombreroKernels(void) {}
/*
* SombreroKernels::CreatePhiValues
*/
bool SombreroKernels::CreatePhiValues(const float p_threshold, std::vector<float>& p_phi_values,
std::vector<bool> p_valid_phi_values, const std::vector<std::vector<Edge>>& p_vertex_edge_offset,
const std::vector<uint>& p_vertex_edge_offset_depth, const std::vector<int>& p_vertex_type) {
// Convert vertex edge offset to CUDA
uint vertex_cnt = static_cast<uint>(p_phi_values.size());
std::vector<Edge> cuda_vertex_offset;
cuda_vertex_offset.reserve(static_cast<size_t>(p_vertex_edge_offset_depth.back()) * 30);
for (const auto& offset : p_vertex_edge_offset) {
for (const auto& edge : offset) {
cuda_vertex_offset.push_back(edge);
}
}
// Store the vertex IDs of neighbouring vertices.
std::vector<uint> vertex_neighbours = std::vector<uint>(cuda_vertex_offset.size());
// Store the offset for neighbours with the type of -1.
std::vector<uint> vertex_neighbours_offset = std::vector<uint>(vertex_cnt, 0);
// Upload data and delete local copy.
thrust::device_vector<float> p_phi_values_one_d = p_phi_values;
thrust::device_vector<float> p_phi_values_two_d = p_phi_values;
thrust::device_vector<bool> p_valid_phi_values_d = p_valid_phi_values;
thrust::device_vector<Edge> cuda_vertex_offset_d = cuda_vertex_offset;
thrust::device_vector<uint> p_vertex_edge_offset_depth_d = p_vertex_edge_offset_depth;
thrust::device_vector<int> p_vertex_type_d = p_vertex_type;
thrust::device_vector<uint> vertex_neighbours_d = vertex_neighbours;
thrust::device_vector<uint> vertex_neighbours_offset_d = vertex_neighbours_offset;
uint edge_cnt = static_cast<uint>(cuda_vertex_offset.size());
cuda_vertex_offset.clear();
cuda_vertex_offset.shrink_to_fit();
vertex_neighbours.clear();
vertex_neighbours.shrink_to_fit();
vertex_neighbours_offset.clear();
vertex_neighbours_offset.shrink_to_fit();
// Get the neighbours of every vertex.
GetNeighbourIds<<<Grid(vertex_cnt, 256), 256>>>(thrust::raw_pointer_cast(vertex_neighbours_d.data().get()),
thrust::raw_pointer_cast(p_valid_phi_values_d.data().get()),
thrust::raw_pointer_cast(cuda_vertex_offset_d.data().get()),
thrust::raw_pointer_cast(p_vertex_edge_offset_depth_d.data().get()), vertex_cnt, edge_cnt);
checkCudaErrors(cudaDeviceSynchronize());
// Get the offsets for the neighbours with the type of -1.
SortNeighbourIds<<<Grid(vertex_cnt, 256), 256>>>(thrust::raw_pointer_cast(vertex_neighbours_d.data().get()),
thrust::raw_pointer_cast(vertex_neighbours_offset_d.data().get()),
thrust::raw_pointer_cast(p_valid_phi_values_d.data().get()),
thrust::raw_pointer_cast(p_vertex_edge_offset_depth_d.data().get()),
thrust::raw_pointer_cast(p_vertex_type_d.data().get()), vertex_cnt, edge_cnt);
checkCudaErrors(cudaDeviceSynchronize());
// Perform iterations.
float diff = 2.0f * p_threshold;
size_t round = 0;
while (diff > p_threshold) {
SetPhiValues<<<Grid(vertex_cnt, 256), 256>>>(thrust::raw_pointer_cast(p_phi_values_one_d.data().get()),
thrust::raw_pointer_cast(p_phi_values_two_d.data().get()),
thrust::raw_pointer_cast(p_valid_phi_values_d.data().get()),
thrust::raw_pointer_cast(vertex_neighbours_d.data().get()),
thrust::raw_pointer_cast(p_vertex_edge_offset_depth_d.data().get()),
thrust::raw_pointer_cast(p_vertex_type_d.data().get()),
thrust::raw_pointer_cast(vertex_neighbours_offset_d.data().get()), vertex_cnt, edge_cnt);
checkCudaErrors(cudaDeviceSynchronize());
SetPhiValues<<<Grid(vertex_cnt, 256), 256>>>(thrust::raw_pointer_cast(p_phi_values_two_d.data().get()),
thrust::raw_pointer_cast(p_phi_values_one_d.data().get()),
thrust::raw_pointer_cast(p_valid_phi_values_d.data().get()),
thrust::raw_pointer_cast(vertex_neighbours_d.data().get()),
thrust::raw_pointer_cast(p_vertex_edge_offset_depth_d.data().get()),
thrust::raw_pointer_cast(p_vertex_type_d.data().get()),
thrust::raw_pointer_cast(vertex_neighbours_offset_d.data().get()), vertex_cnt, edge_cnt);
checkCudaErrors(cudaDeviceSynchronize());
// Check the difference between the two.
if (round % 5000 == 0) {
float first_sum = thrust::reduce(p_phi_values_two_d.begin(), p_phi_values_two_d.end());
float second_sum = thrust::reduce(p_phi_values_one_d.begin(), p_phi_values_one_d.end());
diff = abs(second_sum - first_sum);
}
round++;
}
thrust::copy(p_phi_values_one_d.begin(), p_phi_values_one_d.end(), p_phi_values.begin());
return true;
}
/*
* SombreroKernels::CreateZValues
*/
bool SombreroKernels::CreateZValues(const uint p_iterations, std::vector<float>& p_zvalues,
std::vector<bool> p_valid_z_values, const std::vector<std::vector<Edge>>& p_vertex_edge_offset,
const std::vector<uint>& p_vertex_edge_offset_depth, const std::vector<uint>& p_vertex_multiplicity) {
// Convert vertex edge offset to CUDA
uint vertex_cnt = static_cast<uint>(p_zvalues.size());
std::vector<SombreroKernels::Edge> cuda_vertex_offset;
cuda_vertex_offset.reserve(static_cast<size_t>(p_vertex_edge_offset_depth.back()) * 30);
for (const auto& offset : p_vertex_edge_offset) {
for (const auto& edge : offset) {
cuda_vertex_offset.push_back(edge);
}
}
// Store the vertex IDs of neighbouring vertices.
std::vector<uint> vertex_neighbours = std::vector<uint>(cuda_vertex_offset.size());
// Upload data and delete local copy.
thrust::device_vector<float> p_zvalues_d = p_zvalues;
thrust::device_vector<bool> p_valid_z_values_d = p_valid_z_values;
thrust::device_vector<SombreroKernels::Edge> cuda_vertex_offset_d = cuda_vertex_offset;
thrust::device_vector<uint> p_vertex_edge_offset_depth_d = p_vertex_edge_offset_depth;
thrust::device_vector<uint> vertex_neighbours_d = vertex_neighbours;
thrust::device_vector<uint> p_vertex_multiplicity_d = p_vertex_multiplicity;
uint edge_cnt = static_cast<uint>(cuda_vertex_offset.size());
cuda_vertex_offset.clear();
cuda_vertex_offset.shrink_to_fit();
vertex_neighbours.clear();
vertex_neighbours.shrink_to_fit();
// Get the neighbours of every vertex.
GetNeighbourIds<<<Grid(vertex_cnt, 256), 256>>>(thrust::raw_pointer_cast(vertex_neighbours_d.data().get()),
thrust::raw_pointer_cast(p_valid_z_values_d.data().get()),
thrust::raw_pointer_cast(cuda_vertex_offset_d.data().get()),
thrust::raw_pointer_cast(p_vertex_edge_offset_depth_d.data().get()), vertex_cnt, edge_cnt);
checkCudaErrors(cudaDeviceSynchronize());
// Perform iterations.
for (uint i = 0; i < p_iterations; i++) {
SetZValues<<<Grid(vertex_cnt, 256), 256>>>(thrust::raw_pointer_cast(p_zvalues_d.data().get()),
thrust::raw_pointer_cast(p_valid_z_values_d.data().get()),
thrust::raw_pointer_cast(vertex_neighbours_d.data().get()),
thrust::raw_pointer_cast(p_vertex_edge_offset_depth_d.data().get()),
thrust::raw_pointer_cast(p_vertex_multiplicity_d.data().get()), vertex_cnt, edge_cnt);
checkCudaErrors(cudaDeviceSynchronize());
}
thrust::copy(p_zvalues_d.begin(), p_zvalues_d.end(), p_zvalues.begin());
return true;
}
/*
* SombreroKernels::SombreroKernels
*/
SombreroKernels::SombreroKernels(void) {}
/*
* SombreroKernels::SortEdges
*/
bool SombreroKernels::SortEdges(std::vector<Edge>& p_edges, const uint p_id) {
// Upload the data.
thrust::device_vector<Edge> edges_d = p_edges;
// Sort the data.
if (p_id == 0) {
thrust::sort(edges_d.begin(), edges_d.end(), VertexID0Cmp());
} else if (p_id == 1) {
thrust::sort(edges_d.begin(), edges_d.end(), VertexID1Cmp());
} else {
return false;
}
// Download the data.
thrust::copy(edges_d.begin(), edges_d.end(), p_edges.begin());
return true;
}
|
d5b86fa2e6d6da2d253a33b691566a9c2297ee59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../include/cudaconv2.cuh"
#include "../../nvmatrix/include/nvmatrix_kernels.cuh"
#define LO16(x) ((x) & 0x0000FFFF)
#define HI16(x) ((x) >> 16)
#define WA_LOOP(r) _Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
_Pragma("unroll") \
for (int f = 0; f < filtersPerThread; f++) { \
prod[f][c] += shImages[threadIdx.y + c * B_Y][(r)] * shHidActs[threadIdx.x + f * B_X][(r)]; \
} \
}
#define WA_LOOP2(r) _Pragma("unroll") \
for (int f = 0; f < filtersPerThread; f++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[f][c] += shImages[threadIdx.y + c * B_Y][(r)] * shHidActs[threadIdx.x + f * B_X][(r)]; \
} \
}
#define WA_IMLOAD(r) imPreload[r] = im[(r) * B_X * B_Y / preloadCases * imgPixels * imgStride];
#define WA_IMLOAD_2(r) imPreload[r] = images[imgOffset2 + (r) * B_X * B_Y / preloadCases * imgPixels * imgStride];
#define WA_IMLOAD_TX(r) imPreload[r] = tex1Dfetch<float>(images, imgOffset2 + (r) * B_X * B_Y / preloadCases * imgPixels * imgStride);
#define WA_HALOAD(r) haPreload[r] = ha[(r) * B_X * B_Y / preloadCases * numImages * numModules];
#define WA_HALOAD_2(r) haPreload[r] = hidActs[hidActsOffset2 + (r) * B_X * B_Y / preloadCases * numImages * numModules];
#define WA_HALOAD_TX(r) haPreload[r] = tex1Dfetch<float>(hidActs, hidActsOffset2 + (r) * B_X * B_Y / preloadCases * numImages * numModules);
__device__ __forceinline__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
const int my, const int mx, const int paddingStart, const int numModulesX, const int moduleStride,
const int blockPixelY, const int blockPixelX, const int imgSizeX,
const int imgStride, int& pixIdx, int& m) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
const int imgLoadModPosX = paddingStart + mx * moduleStride;
const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image
const int pxX = imgLoadModPosX + blockPixelX;
pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
m = my * numModulesX + mx;
}
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X * filtersPerThread
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
* pixelsPerThread must be divisible by pixelCache
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_c_kepler(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int filterBlocksPerModule = numFilters / (B_X*filtersPerThread);
const int outputModuleIdx = blockIdx.x / filterBlocksPerModule;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % filterBlocksPerModule);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
images += loadX;
hidActs += blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += (outputModuleIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float prod[numColors][pixelsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][p][f] = 0;
}
}
}
__shared__ int pxIdxes[B_Y*pixelsPerThread];
//__shared__ bool isPxInImage[B_Y*pixelsPerThread];
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
__syncthreads();
if (tidx < B_Y * pixelsPerThread) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
int pxY = (imgLoadModPosY + (blockPixelOffset + tidx) / filterSize);
int pxX = (imgLoadModPosX + (blockPixelOffset + tidx) % filterSize);
int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1;
//isPxInImage[tidx] = ;
}
__syncthreads();
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (/*loadY < B_X*filtersPerThread &&*/ (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X*filtersPerThread) {
shHidActs[loadY+y][loadX]= hidActs[caseIdx + y * numImages * numModules + m * numImages];
}
}
}
#pragma unroll
for (int pp = 0; pp < pixelsPerThread; pp += pixelCache) {
//if (loadY < B_Y * pixelCache) { // This condition is not necessary for correctness, but it speeds things a bit
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some imgGrads from filter pixels that don't exit (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
const int pxIdx = pp * B_Y + loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX]= 0;
}
}
}
}
//}
__syncthreads();
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int p = 0; p < pixelCache; p++) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
prod[c][pp + p][f] += shImages[threadIdx.y + p * B_Y + c * pixelCache * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f];
}
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f];
}
}
}
}
}
}
/*
* Each block computes weight gradients for 1 pixel, B_Y * colorsPerThread colors and B_X * filtersPerThread filters
* threadIdx.x determines filter
* threadIdx.y determines color
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines color batch of B_Y * colorsPerThread
* blockIdx.z determines pixel in filter
* NOTE: blockIdx.z is limited to values < 2^16. This means that this routine will
* fail for filters >= 256*256. I'm assuming I won't ever use such large filters.
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
* B_X * B_Y must be divisible by preloadCases
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__global__ void conv_weight_acts_mc_mf_kepler(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
hidActs +=
blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
//if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[colorsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] = 0;
}
}
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image
const int pxX = imgLoadModPosX + blockPixelX;
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
// Checking this condition actually makes things faster ... :/
// So I've removed the !checkCaseBounds flag and just check it all the time.
if (caseIdx + loadX < numImages) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
if (loadY < B_Y * colorsPerThread) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) {
shImgLoad[(y) * preloadCases] = images[caseIdx + y * imgPixels * imgStride + pixIdx];
}
}
}
if (loadY < B_X * filtersPerThread) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules + m * numImages];
}
}
}
} else {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) {
shImgLoad[(y) * preloadCases] = 0;
}
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = 0;
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f];
}
}
}
}
/*
* Each block computes weight gradients for 1 pixel, B_Y * colorsPerThread colors and B_X * filtersPerThread filters
* threadIdx.x determines filter
* threadIdx.y determines color
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines color batch of B_Y * colorsPerThread
* blockIdx.z determines pixel in filter
* NOTE: blockIdx.z is limited to values < 2^16. This means that this routine will
* fail for filters >= 256*256. I'm assuming I won't ever use such large filters.
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
* B_X * B_Y must be divisible by preloadCases
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__global__ void conv_weight_acts_mc_mf_kepler_sw(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
hidActs +=
blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
//if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[colorsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] = 0;
}
}
/*
* Note; iterating this way is about 1% slower and uses a few more registers than iterating
* over the modules linearly. But it's consistent with the preload routines,
* so I'm using it.
*/
for (int my = mStartY; my < mEndY; my++) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image
for (int mx = mStartX; mx < mEndX; mx++) {
const int m = my * numModulesX + mx;
const int imgLoadModPosX = paddingStart + mx * moduleStride;
const int pxX = imgLoadModPosX + blockPixelX;
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
// Checking this condition actually makes things faster ... :/
// So I've removed the !checkCaseBounds flag and just check it all the time.
if (caseIdx + loadX < numImages) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
if (loadY < B_Y * colorsPerThread) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) {
shImgLoad[(y) * preloadCases] = images[caseIdx + y * imgPixels * imgStride + pixIdx];
}
}
}
if (loadY < B_X * filtersPerThread) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules + m * numImages];
}
}
}
} else {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) {
shImgLoad[(y) * preloadCases] = 0;
}
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = 0;
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f];
}
}
}
}
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X * filtersPerThread
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
* pixelsPerThread must be divisible by pixelCache
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_c_kepler_sw(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X*filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
images += loadX;
hidActs += blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
+ loadX;
targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
//float* shImgLoad = &shImages[loadY][loadX];
//float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][p][f] = 0;
}
}
}
const int mStartX = blockModuleStartX;
const int mStartY = blockModuleStartY;
const int mEndX = min(numModulesX, blockModuleStartX + sumWidth);
const int mEndY = min(numModulesY, blockModuleStartY + sumWidth);
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
const int fYOff = (blockPixelOffset + tidx) / filterSize;
const int fXOff = (blockPixelOffset + tidx) % filterSize;
__shared__ int pxIdxes[B_Y*pixelsPerThread];
for (int my = mStartY; my < mEndY; my++) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
for (int mx = mStartX; mx < mEndX; mx++) {
const int m = my * numModulesX + mx;
__syncthreads();
const int imgLoadModPosX = paddingStart + mx * moduleStride;
if (tidx < B_Y * pixelsPerThread) {
// const int imgLoadModPosY = paddingStart + my * moduleStride;
// const int imgLoadModPosX = paddingStart + mx * moduleStride;
int pxY = (imgLoadModPosY + fYOff);
int pxX = (imgLoadModPosX + fXOff);
int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1;
}
__syncthreads();
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (/*loadY < B_X*filtersPerThread &&*/ (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
const int fIdx = ((loadY + y) % filtersPerThread) * B_X + (loadY + y) / filtersPerThread;
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || loadY+y < B_X*filtersPerThread) {
shHidActs[loadY+y][loadX]= hidActs[caseIdx + fIdx * numImages * numModules + m * numImages];
}
}
} else {
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// const int fIdx = ((loadY + y) % filtersPerThread) * B_X + (loadY + y) / filtersPerThread;
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || loadY+y < B_X*filtersPerThread) {
shHidActs[loadY+y][loadX] = 0;
}
}
}
#pragma unroll
for (int pp = 0; pp < pixelsPerThread; pp += pixelCache) {
//if (loadY < B_Y * pixelCache) { // This condition is not necessary for correctness, but it speeds things a bit
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some imgGrads from filter pixels that don't exit (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
const int pxIdx = pp * B_Y + loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX]= 0;
}
}
}
}
//}
__syncthreads();
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int p = 0; p < pixelCache; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][pp + p][f] += shImages[threadIdx.y + p * B_Y + c * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i];
}
}
}
}
__syncthreads();
}
}
}
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f];
}
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f];
}
}
}
}
}
}
#define WA_C3_LOOP(pp, c) _Pragma("unroll") \
for (int i = 0; i < preloadCases; i++) { \
_Pragma("unroll") \
for (int p = 0; p < pixelCache; p++) { \
_Pragma("unroll") \
for (int f = 0; f < filtersPerThread; f++) { \
prod[c][(pp) + p][f] += shImages[threadIdx.y + p * B_Y + (c) * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; \
} \
} \
}
#define WA_C3_LOOP2(pp) _Pragma("unroll") \
for (int p = 0; p < pixelCache; p++) { \
_Pragma("unroll") \
for (int i = 0; i < preloadCases; i++) { \
_Pragma("unroll") \
for (int f = 0; f < filtersPerThread; f++) { \
_Pragma("unroll") \
for (int c = 0; c < 3; ++c) { \
prod[c][(pp) + p][f] += shImages[threadIdx.y + p * B_Y + (c) * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; \
} \
} \
} \
}
#define WA_3_FIDX(y) (((loadY + (y)*B_X*B_Y/preloadCases) % filtersPerThread) * B_X + (loadY + (y)*B_X*B_Y/preloadCases) / filtersPerThread)
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X * filtersPerThread
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
* pixelsPerThread must be divisible by pixelCache
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
//__launch_bounds__(256,2)
__global__ void conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X*filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
const int imgOffset = loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX;
// images += loadX;
// hidActs += blockFilterIdx * numImages * numModules
// + loadX;
targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
//float* shImgLoad = &shImages[loadY][loadX];
//float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][p][f] = 0;
}
}
}
const int mStartX = blockModuleStartX;
const int mStartY = blockModuleStartY;
const int mEndX = min(numModulesX, blockModuleStartX + sumWidth);
const int mEndY = min(numModulesY, blockModuleStartY + sumWidth);
const bool doWork = mStartY < mEndY && mStartX < mEndX;
// if (!doWork) {
// hidActs -=
// }
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
// float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12]
float haPreload[filtersPerThread * preloadCases / B_Y]; // [8]
// if (blockIdx.x != 0 || blockIdx.y !=0) {
// return;
// }
// printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY);
const int fYOff = (blockPixelOffset + tidx) / filterSize;
const int fXOff = (blockPixelOffset + tidx) % filterSize;
__shared__ int pxIdxes[B_Y*pixelsPerThread];
// __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [8]
int m = mStartY * numModulesX + mStartX;
int fidx[filtersPerThread * preloadCases / B_Y];
if (doWork) {
#pragma unroll
for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) {
const int fIdx = WA_3_FIDX(y);
// if (doWork) {
haPreload[y] = tex1Dfetch<float>(hidActs, hidActsOffset + fIdx * numImages * numModules + m * numImages);
// }
fidx[y] = fIdx * numImages * numModules;
}
}
for (int my = mStartY; my < mEndY; my++) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
for (int mx = mStartX; mx < mEndX; mx++) {
m = my * numModulesX + mx;
// __syncthreads();
const int imgLoadModPosX = paddingStart + mx * moduleStride;
if (tidx < B_Y * pixelsPerThread) {
// const int imgLoadModPosY = paddingStart + my * moduleStride;
// const int imgLoadModPosX = paddingStart + mx * moduleStride;
const int pxY = (imgLoadModPosY + fYOff);
const int pxX = (imgLoadModPosX + fXOff);
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1;
}
__syncthreads();
int myNext = my, mxNext = mx, mNext = m;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
mNext = myNext * numModulesX + mxNext;
}
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
const bool lastBatch = caseIdx + preloadCases == numImages;
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
if (lastBatch) {
// ha = &hidActs[mNext * numImages];
hidActsOffset2 = hidActsOffset + mNext * numImages;
}
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)];
}
/* ==================================================================================
* Iteration 0
* ==================================================================================
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels) {
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx);
}
}
}
}
__syncthreads();
haPreload[0] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[0]);
haPreload[1] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[1]);
WA_C3_LOOP(0,0);
haPreload[2] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[2]);
haPreload[3] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[3]);
WA_C3_LOOP(0,1);
haPreload[4] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[4]);
haPreload[5] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[5]);
WA_C3_LOOP(0,2);
haPreload[6] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[6]);
haPreload[7] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[7]);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f];
}
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
// if (threadIdx.x == 3)
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f];
}
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X * filtersPerThread
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
* pixelsPerThread must be divisible by pixelCache
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
//__launch_bounds__(256,2)
__global__ void conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X*filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
const int imgOffset = loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX;
// images += loadX;
// hidActs += blockFilterIdx * numImages * numModules
// + loadX;
targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
//float* shImgLoad = &shImages[loadY][loadX];
//float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][p][f] = 0;
}
}
}
const int mStartX = blockModuleStartX;
const int mStartY = blockModuleStartY;
const int mEndX = min(numModulesX, blockModuleStartX + sumWidth);
const int mEndY = min(numModulesY, blockModuleStartY + sumWidth);
const bool doWork = mStartY < mEndY && mStartX < mEndX;
// if (!doWork) {
// hidActs -=
// }
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
// float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12]
float haPreload[filtersPerThread * preloadCases / B_Y]; // [8]
// if (blockIdx.x != 0 || blockIdx.y !=0) {
// return;
// }
// printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY);
const int fYOff = (blockPixelOffset + tidx) / filterSize;
const int fXOff = (blockPixelOffset + tidx) % filterSize;
__shared__ int pxIdxes[B_Y*pixelsPerThread];
// __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [8]
int m = mStartY * numModulesX + mStartX;
int fidx[filtersPerThread * preloadCases / B_Y];
if (doWork) {
#pragma unroll
for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) {
const int fIdx = WA_3_FIDX(y);
// if (doWork) {
haPreload[y] = hidActs[hidActsOffset + fIdx * numImages * numModules + m * numImages];
// }
fidx[y] = fIdx * numImages * numModules;
}
}
for (int my = mStartY; my < mEndY; my++) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
for (int mx = mStartX; mx < mEndX; mx++) {
m = my * numModulesX + mx;
// __syncthreads();
const int imgLoadModPosX = paddingStart + mx * moduleStride;
if (tidx < B_Y * pixelsPerThread) {
// const int imgLoadModPosY = paddingStart + my * moduleStride;
// const int imgLoadModPosX = paddingStart + mx * moduleStride;
const int pxY = (imgLoadModPosY + fYOff);
const int pxX = (imgLoadModPosX + fXOff);
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1;
}
__syncthreads();
int myNext = my, mxNext = mx, mNext = m;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
mNext = myNext * numModulesX + mxNext;
}
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
const bool lastBatch = caseIdx + preloadCases == numImages;
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
if (lastBatch) {
// ha = &hidActs[mNext * numImages];
hidActsOffset2 = hidActsOffset + mNext * numImages;
}
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)];
}
/* ==================================================================================
* Iteration 0
* ==================================================================================
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels) {
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx];
}
}
}
}
__syncthreads();
haPreload[0] = hidActs[hidActsOffset2 + fidx[0]];
haPreload[1] = hidActs[hidActsOffset2 + fidx[1]];
WA_C3_LOOP(0,0);
haPreload[2] = hidActs[hidActsOffset2 + fidx[2]];
haPreload[3] = hidActs[hidActsOffset2 + fidx[3]];
WA_C3_LOOP(0,1);
haPreload[4] = hidActs[hidActsOffset2 + fidx[4]];
haPreload[5] = hidActs[hidActsOffset2 + fidx[5]];
WA_C3_LOOP(0,2);
haPreload[6] = hidActs[hidActsOffset2 + fidx[6]];
haPreload[7] = hidActs[hidActsOffset2 + fidx[7]];
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f];
}
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
// if (threadIdx.x == 3)
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f];
}
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X * filtersPerThread
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
* pixelsPerThread must be divisible by pixelCache
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
__launch_bounds__(256,2)
__global__ void conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X*filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
const int imgOffset = loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadX;
// images += loadX;
// hidActs += blockFilterIdx * numImages * numModules
// + loadX;
targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
//float* shImgLoad = &shImages[loadY][loadX];
//float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][p][f] = 0;
}
}
}
const int mStartX = blockModuleStartX;
const int mStartY = blockModuleStartY;
const int mEndX = min(numModulesX, blockModuleStartX + sumWidth);
const int mEndY = min(numModulesY, blockModuleStartY + sumWidth);
const bool doWork = mStartY < mEndY && mStartX < mEndX;
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
// float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12]
float haPreload[filtersPerThread * preloadCases / B_Y]; // [6]
// if (blockIdx.x != 0 || blockIdx.y !=0) {
// return;
// }
// printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY);
const int fYOff = (blockPixelOffset + tidx) / filterSize;
const int fXOff = (blockPixelOffset + tidx) % filterSize;
__shared__ int pxIdxes[B_Y*pixelsPerThread];
// __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [6]
int m = mStartY * numModulesX + mStartX;
int fidx[filtersPerThread * preloadCases / B_Y];
// if (doWork) {
#pragma unroll
for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) {
fidx[y] = WA_3_FIDX(y) * numImages * numModules;
if (doWork) { // Not actually necessary, I think
haPreload[y] = tex1Dfetch<float>(hidActs, hidActsOffset + fidx[y] + m * numImages);
}
}
// }
int mNext = mStartY * numModulesX + mStartX;
for (int my = mStartY; my < mEndY; my++) {
// const int imgLoadModPosY = paddingStart + my * moduleStride;
for (int mx = mStartX; mx < mEndX; mx++) {
m = mNext;//my * numModulesX + mx;
// __syncthreads();
// const int imgLoadModPosX = paddingStart + mx * moduleStride;
if (tidx < B_Y * pixelsPerThread) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
const int imgLoadModPosX = paddingStart + mx * moduleStride;
const int pxY = (imgLoadModPosY + fYOff);
const int pxX = (imgLoadModPosX + fXOff);
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1;
}
__syncthreads();
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
mNext = lastModule * m + !lastModule * ((my + (mx + 1 == mEndX)) * numModulesX + (mx + 1 == mEndX ? mStartX : mx + 1));
// if (!lastModule) {
// const int mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
// const int myNext = my + (mx + 1 == mEndX);
// mNext = myNext * numModulesX + mxNext;
// }
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
const bool lastBatch = caseIdx + preloadCases == numImages;
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = hidActs + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages;
const int hidActsOffset2 = hidActsOffset + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages;
// if (lastBatch) {
// ha = &hidActs[mNext * numImages];
// }
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)];
}
/* ==================================================================================
* Iteration 0
* ==================================================================================
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
}
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx);
}
}
}
}
__syncthreads();
haPreload[0] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[0]);
haPreload[1] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[1]);
haPreload[2] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[2]);
haPreload[3] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[3]);
haPreload[4] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[4]);
haPreload[5] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[5]);
WA_C3_LOOP2(0);
__syncthreads();
/* ==================================================================================
* Iteration 1
* ==================================================================================
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
// const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
}
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx);
}
}
}
}
__syncthreads();
WA_C3_LOOP2(2);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f];
}
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f];
}
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X * filtersPerThread
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
* pixelsPerThread must be divisible by pixelCache
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
__launch_bounds__(256,2)
__global__ void conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X*filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
const int imgOffset = loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadX;
// images += loadX;
// hidActs += blockFilterIdx * numImages * numModules
// + loadX;
targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
//float* shImgLoad = &shImages[loadY][loadX];
//float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][p][f] = 0;
}
}
}
const int mStartX = blockModuleStartX;
const int mStartY = blockModuleStartY;
const int mEndX = min(numModulesX, blockModuleStartX + sumWidth);
const int mEndY = min(numModulesY, blockModuleStartY + sumWidth);
const bool doWork = mStartY < mEndY && mStartX < mEndX;
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
// float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12]
float haPreload[filtersPerThread * preloadCases / B_Y]; // [6]
// if (blockIdx.x != 0 || blockIdx.y !=0) {
// return;
// }
// printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY);
const int fYOff = (blockPixelOffset + tidx) / filterSize;
const int fXOff = (blockPixelOffset + tidx) % filterSize;
__shared__ int pxIdxes[B_Y*pixelsPerThread];
// __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [6]
int m = mStartY * numModulesX + mStartX;
int fidx[filtersPerThread * preloadCases / B_Y];
// if (doWork) {
#pragma unroll
for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) {
fidx[y] = WA_3_FIDX(y) * numImages * numModules;
if (doWork) { // Not actually necessary, I think
haPreload[y] = hidActs[hidActsOffset + fidx[y] + m * numImages];
}
}
// }
int mNext = mStartY * numModulesX + mStartX;
for (int my = mStartY; my < mEndY; my++) {
// const int imgLoadModPosY = paddingStart + my * moduleStride;
for (int mx = mStartX; mx < mEndX; mx++) {
m = mNext;//my * numModulesX + mx;
// __syncthreads();
// const int imgLoadModPosX = paddingStart + mx * moduleStride;
if (tidx < B_Y * pixelsPerThread) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
const int imgLoadModPosX = paddingStart + mx * moduleStride;
const int pxY = (imgLoadModPosY + fYOff);
const int pxX = (imgLoadModPosX + fXOff);
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1;
}
__syncthreads();
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
mNext = lastModule * m + !lastModule * ((my + (mx + 1 == mEndX)) * numModulesX + (mx + 1 == mEndX ? mStartX : mx + 1));
// if (!lastModule) {
// const int mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
// const int myNext = my + (mx + 1 == mEndX);
// mNext = myNext * numModulesX + mxNext;
// }
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
const bool lastBatch = caseIdx + preloadCases == numImages;
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = hidActs + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages;
const int hidActsOffset2 = hidActsOffset + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages;
// if (lastBatch) {
// ha = &hidActs[mNext * numImages];
// }
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)];
}
/* ==================================================================================
* Iteration 0
* ==================================================================================
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
}
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx];
}
}
}
}
__syncthreads();
haPreload[0] = hidActs[hidActsOffset2 + fidx[0]];
haPreload[1] = hidActs[hidActsOffset2 + fidx[1]];
haPreload[2] = hidActs[hidActsOffset2 + fidx[2]];
haPreload[3] = hidActs[hidActsOffset2 + fidx[3]];
haPreload[4] = hidActs[hidActsOffset2 + fidx[4]];
haPreload[5] = hidActs[hidActsOffset2 + fidx[5]];
WA_C3_LOOP2(0);
__syncthreads();
/* ==================================================================================
* Iteration 1
* ==================================================================================
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
// const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
}
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx];
}
}
}
}
__syncthreads();
WA_C3_LOOP2(2);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f];
}
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f];
}
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__launch_bounds__(128, 4)
__global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
// const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
// images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
//
// hidActs +=
// blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
// + loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
// if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
// const bool doWork = mStartY < mEndY && mStartX < mEndX;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float imPreload[preloadCases*colorsPerThread/B_X]; // [8]
float haPreload[preloadCases*filtersPerThread/B_Y]; // [8]
float prod[filtersPerThread][colorsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[f][c] = 0;
}
}
int pixIdx, pixIdxNext, m, mNext;
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
mStartY, mStartX, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdx, m);
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
// It's bizarre, but this is the fastest way I've found to get it not to load nonexistent pixels.
// All other ways cause crazy excessive register usage.
const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * imgPixels * imgStride + pixIdx);
imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + idx);
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Almost certainly not necessary here.
const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * numImages * numModules + m * numImages);
haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + idx);
}
for (int my = mStartY; my < mEndY; my++) {
for (int mx = mStartX; mx < mEndX; mx++) {
int myNext = my, mxNext = mx;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
}
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
myNext, mxNext, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdxNext, mNext);
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)];
}
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx;
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
if (caseIdx + preloadCases == numImages) {
pixIdx = pixIdxNext;
m = mNext;
imgOffset2 = imgOffset + pixIdxNext;
hidActsOffset2 = hidActsOffset + mNext * numImages;
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)];
}
__syncthreads();
#pragma unroll
for (int z = 0; z < 8; ++z) {
WA_IMLOAD_TX(z);
WA_LOOP2(z);
}
#pragma unroll
for (int z = 0; z < 8; ++z) {
WA_HALOAD_TX(z);
WA_LOOP2(z+8);
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c];
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__launch_bounds__(128, 4)
__global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
// const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
// images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
//
// hidActs +=
// blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
// + loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
// if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
// const bool doWork = mStartY < mEndY && mStartX < mEndX;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float imPreload[preloadCases*colorsPerThread/B_X]; // [8]
float haPreload[preloadCases*filtersPerThread/B_Y]; // [8]
float prod[filtersPerThread][colorsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[f][c] = 0;
}
}
int pixIdx, pixIdxNext, m, mNext;
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
mStartY, mStartX, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdx, m);
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
// It's bizarre, but this is the fastest way I've found to get it not to load nonexistent pixels.
// All other ways cause crazy excessive register usage.
const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * imgPixels * imgStride + pixIdx);
imPreload[y * preloadCases/(B_X * B_Y)] = images[imgOffset + idx];
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Almost certainly not necessary here.
const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * numImages * numModules + m * numImages);
haPreload[y * preloadCases / (B_X * B_Y)] = hidActs[hidActsOffset + idx];
}
for (int my = mStartY; my < mEndY; my++) {
for (int mx = mStartX; mx < mEndX; mx++) {
int myNext = my, mxNext = mx;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
}
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
myNext, mxNext, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdxNext, mNext);
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)];
}
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx;
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
if (caseIdx + preloadCases == numImages) {
pixIdx = pixIdxNext;
m = mNext;
imgOffset2 = imgOffset + pixIdxNext;
hidActsOffset2 = hidActsOffset + mNext * numImages;
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)];
}
__syncthreads();
#pragma unroll
for (int z = 0; z < 8; ++z) {
WA_IMLOAD_2(z);
WA_LOOP2(z);
}
#pragma unroll
for (int z = 0; z < 8; ++z) {
WA_HALOAD_2(z);
WA_LOOP2(z+8);
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c];
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__launch_bounds__(256, 2)
__global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
// const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
// images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
//
// hidActs +=
// blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
// + loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
const bool doWork = mStartY < mEndY && mStartX < mEndX;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float imPreload[preloadCases*colorsPerThread/B_X]; // [6]
float haPreload[preloadCases*filtersPerThread/B_Y]; // [16]
float prod[filtersPerThread][colorsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[f][c] = 0;
}
}
int pixIdx, pixIdxNext, m, mNext;
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
mStartY, mStartX, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdx, m);
if (doWork) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + y * imgPixels * imgStride + pixIdx);
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + y * numImages * numModules + m * numImages);
}
}
for (int my = mStartY; my < mEndY; my++) {
for (int mx = mStartX; mx < mEndX; mx++) {
int myNext = my, mxNext = mx;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
}
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
myNext, mxNext, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdxNext, mNext);
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)];
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)];
}
__syncthreads();
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx;
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
if (caseIdx + preloadCases == numImages) {
pixIdx = pixIdxNext;
m = mNext;
imgOffset2 = imgOffset + pixIdxNext;
hidActsOffset2 = hidActsOffset + mNext * numImages;
}
WA_LOOP(0);
WA_LOOP(1);
WA_LOOP(2);
WA_LOOP(3);
WA_LOOP(4);
WA_LOOP(5);
WA_IMLOAD_TX(0);
WA_LOOP(6);
WA_IMLOAD_TX(1);
WA_LOOP(7);
WA_IMLOAD_TX(2);
WA_LOOP(8);
WA_IMLOAD_TX(3);
WA_LOOP(9);
WA_IMLOAD_TX(4);
WA_LOOP(10);
WA_IMLOAD_TX(5);
WA_LOOP(11);
WA_HALOAD_TX(0);
WA_LOOP(12);
WA_HALOAD_TX(1);
WA_LOOP(13);
WA_HALOAD_TX(2);
WA_LOOP(14);
WA_HALOAD_TX(3);
WA_LOOP(15);
WA_HALOAD_TX(4);
WA_LOOP(16);
WA_HALOAD_TX(5);
WA_LOOP(17);
WA_HALOAD_TX(6);
WA_LOOP(18);
WA_HALOAD_TX(7);
WA_LOOP(19);
WA_HALOAD_TX(8);
WA_LOOP(20);
WA_HALOAD_TX(9);
WA_LOOP(21);
WA_HALOAD_TX(10);
WA_LOOP(22);
WA_HALOAD_TX(11);
WA_LOOP(23);
WA_HALOAD_TX(12);
WA_LOOP(24);
WA_HALOAD_TX(13);
WA_LOOP(25);
WA_HALOAD_TX(14);
WA_LOOP(26);
WA_HALOAD_TX(15);
WA_LOOP(27);
WA_LOOP(28);
WA_LOOP(29);
WA_LOOP(30);
WA_LOOP(31);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c];
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__launch_bounds__(256, 2)
__global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
// const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
// images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
//
// hidActs +=
// blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
// + loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
const bool doWork = mStartY < mEndY && mStartX < mEndX;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float imPreload[preloadCases*colorsPerThread/B_X]; // [6]
float haPreload[preloadCases*filtersPerThread/B_Y]; // [16]
float prod[filtersPerThread][colorsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[f][c] = 0;
}
}
int pixIdx, pixIdxNext, m, mNext;
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
mStartY, mStartX, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdx, m);
if (doWork) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
imPreload[y * preloadCases/(B_X * B_Y)] = images[imgOffset + y * imgPixels * imgStride + pixIdx];
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
haPreload[y * preloadCases / (B_X * B_Y)] = hidActs[hidActsOffset + y * numImages * numModules + m * numImages];
}
}
for (int my = mStartY; my < mEndY; my++) {
for (int mx = mStartX; mx < mEndX; mx++) {
int myNext = my, mxNext = mx;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
}
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
myNext, mxNext, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdxNext, mNext);
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)];
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)];
}
__syncthreads();
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx;
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
if (caseIdx + preloadCases == numImages) {
pixIdx = pixIdxNext;
m = mNext;
imgOffset2 = imgOffset + pixIdxNext;
hidActsOffset2 = hidActsOffset + mNext * numImages;
}
WA_LOOP(0);
WA_LOOP(1);
WA_LOOP(2);
WA_LOOP(3);
WA_LOOP(4);
WA_LOOP(5);
WA_IMLOAD_2(0);
WA_LOOP(6);
WA_IMLOAD_2(1);
WA_LOOP(7);
WA_IMLOAD_2(2);
WA_LOOP(8);
WA_IMLOAD_2(3);
WA_LOOP(9);
WA_IMLOAD_2(4);
WA_LOOP(10);
WA_IMLOAD_2(5);
WA_LOOP(11);
WA_HALOAD_2(0);
WA_LOOP(12);
WA_HALOAD_2(1);
WA_LOOP(13);
WA_HALOAD_2(2);
WA_LOOP(14);
WA_HALOAD_2(3);
WA_LOOP(15);
WA_HALOAD_2(4);
WA_LOOP(16);
WA_HALOAD_2(5);
WA_LOOP(17);
WA_HALOAD_2(6);
WA_LOOP(18);
WA_HALOAD_2(7);
WA_LOOP(19);
WA_HALOAD_2(8);
WA_LOOP(20);
WA_HALOAD_2(9);
WA_LOOP(21);
WA_HALOAD_2(10);
WA_LOOP(22);
WA_HALOAD_2(11);
WA_LOOP(23);
WA_HALOAD_2(12);
WA_LOOP(24);
WA_HALOAD_2(13);
WA_LOOP(25);
WA_HALOAD_2(14);
WA_LOOP(26);
WA_HALOAD_2(15);
WA_LOOP(27);
WA_LOOP(28);
WA_LOOP(29);
WA_LOOP(30);
WA_LOOP(31);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c];
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__launch_bounds__(256, 2)
__global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex(hipTextureObject_t images, hipTextureObject_t hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
// const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
// images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
//
// hidActs +=
// blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
// + loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
// if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
const bool doWork = mStartY < mEndY && mStartX < mEndX;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float imPreload[preloadCases*colorsPerThread/B_X]; // [4]
float haPreload[preloadCases*filtersPerThread/B_Y]; // [8]
float prod[filtersPerThread][colorsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[f][c] = 0;
}
}
int pixIdx, pixIdxNext, m, mNext;
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
mStartY, mStartX, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdx, m);
if (doWork && loadY < B_Y * colorsPerThread) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + y * imgPixels * imgStride + pixIdx);
}
}
if (doWork && loadY < B_X * filtersPerThread) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + y * numImages * numModules + m * numImages);
}
}
for (int my = mStartY; my < mEndY; my++) {
for (int mx = mStartX; mx < mEndX; mx++) {
int myNext = my, mxNext = mx;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
}
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
myNext, mxNext, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdxNext, mNext);
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
// const float* im = &images[caseIdx + preloadCases + pixIdx];
int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx;
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
if (caseIdx + preloadCases == numImages) {
pixIdx = pixIdxNext;
m = mNext;
// im = &images[pixIdxNext];
imgOffset2 = imgOffset + pixIdxNext;
hidActsOffset2 = hidActsOffset + mNext * numImages;
// ha = &hidActs[mNext * numImages];
}
if (loadY < B_Y * colorsPerThread) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)];
}
}
if (loadY < B_X * filtersPerThread) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)];
}
}
__syncthreads();
WA_LOOP(0);
WA_IMLOAD_TX(0);
WA_LOOP(1);
WA_IMLOAD_TX(1);
WA_LOOP(2);
WA_IMLOAD_TX(2);
WA_LOOP(3);
WA_IMLOAD_TX(3);
WA_LOOP(4);
WA_HALOAD_TX(0);
WA_LOOP(5);
WA_HALOAD_TX(1);
WA_LOOP(6);
WA_HALOAD_TX(2);
WA_LOOP(7);
WA_HALOAD_TX(3);
WA_LOOP(8);
WA_HALOAD_TX(4);
WA_LOOP(9);
WA_HALOAD_TX(5);
WA_LOOP(10);
WA_HALOAD_TX(6);
WA_LOOP(11);
WA_HALOAD_TX(7);
WA_LOOP(12);
WA_LOOP(13);
WA_LOOP(14);
WA_LOOP(15);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c];
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__launch_bounds__(256, 2)
__global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
// const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
// images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
//
// hidActs +=
// blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
// + loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
// if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
const bool doWork = mStartY < mEndY && mStartX < mEndX;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float imPreload[preloadCases*colorsPerThread/B_X]; // [4]
float haPreload[preloadCases*filtersPerThread/B_Y]; // [8]
float prod[filtersPerThread][colorsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[f][c] = 0;
}
}
int pixIdx, pixIdxNext, m, mNext;
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
mStartY, mStartX, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdx, m);
if (doWork && loadY < B_Y * colorsPerThread) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
imPreload[y * preloadCases/(B_X * B_Y)] = images[imgOffset + y * imgPixels * imgStride + pixIdx];
}
}
if (doWork && loadY < B_X * filtersPerThread) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
haPreload[y * preloadCases / (B_X * B_Y)] = hidActs[hidActsOffset + y * numImages * numModules + m * numImages];
}
}
for (int my = mStartY; my < mEndY; my++) {
for (int mx = mStartX; mx < mEndX; mx++) {
int myNext = my, mxNext = mx;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
}
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
myNext, mxNext, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdxNext, mNext);
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
// const float* im = &images[caseIdx + preloadCases + pixIdx];
int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx;
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
if (caseIdx + preloadCases == numImages) {
pixIdx = pixIdxNext;
m = mNext;
// im = &images[pixIdxNext];
imgOffset2 = imgOffset + pixIdxNext;
hidActsOffset2 = hidActsOffset + mNext * numImages;
// ha = &hidActs[mNext * numImages];
}
if (loadY < B_Y * colorsPerThread) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)];
}
}
if (loadY < B_X * filtersPerThread) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)];
}
}
__syncthreads();
WA_LOOP(0);
WA_IMLOAD_2(0);
WA_LOOP(1);
WA_IMLOAD_2(1);
WA_LOOP(2);
WA_IMLOAD_2(2);
WA_LOOP(3);
WA_IMLOAD_2(3);
WA_LOOP(4);
WA_HALOAD_2(0);
WA_LOOP(5);
WA_HALOAD_2(1);
WA_LOOP(6);
WA_HALOAD_2(2);
WA_LOOP(7);
WA_HALOAD_2(3);
WA_LOOP(8);
WA_HALOAD_2(4);
WA_LOOP(9);
WA_HALOAD_2(5);
WA_LOOP(10);
WA_HALOAD_2(6);
WA_LOOP(11);
WA_HALOAD_2(7);
WA_LOOP(12);
WA_LOOP(13);
WA_LOOP(14);
WA_LOOP(15);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c];
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModules, numImages)
*
* targets: (numModuleY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*
* TODO: you can get a slight speed boost for local non-convolutional units by writing special
* routines for partialSum = 1. But I dunno if the code duplication is worth it...
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _weightActs(THCState* state, THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors,
int numGroups, int sumWidth, float scaleTargets, float scaleOutput) {
int numFilterColors = numImgColors / numGroups;
int imgStride = images->stride[0];
int numImages = images->size[1];
int imgPixels = images->size[0] / numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int numModules = numModulesY * numModulesX;
int numFilters = hidActs->size[0] / numModules;
int numFiltersPerGroup = numFilters / numGroups;
THAssert(numImgColors % numGroups == 0);
THAssert(numFilters % (16*numGroups) == 0);
THAssert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 16 == 0)));
THAssert(numGroups == 1 || numFilterColors % 16 == 0);
THAssert(imgSizeY * imgSizeX == imgPixels);
THAssert(images->size[0] == imgPixels * numImgColors);
int filterPixels = filterSize * filterSize;
int outputModuleChunksX = DIVUP(numModulesX, sumWidth);
int outputModuleChunksY = DIVUP(numModulesY, sumWidth);
int outputModuleChunks = outputModuleChunksX * outputModuleChunksY;
// partialSum = partialSum == 0 ? numModules : partialSum;
// THAssert(numModules % partialSum == 0);
THAssert(hidActs->size[1] == numImages);
// These routines don't handle the case when only part of the image is visited in the convolution
THAssert(paddingStart <= 0);
THAssert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
THAssert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
THAssert(moduleStride <= filterSize);
THAssert(numModules * numFilters == hidActs->size[0]);
THAssert(THCudaTensor_isContiguous(state, hidActs));
THAssert(THCudaTensor_isContiguous(state, targets));
int preloadCases = 32;
dim3 blocks, threads;
int bx, by;
int pixelsPerThread, filtersPerThread, colorsPerThread;
// Worth playing with these parameters to find best values for your problem.
// These values work relatively well, but not optimal for all problems.
if (numFilterColors > 3) {
filtersPerThread = numFiltersPerGroup % 64 == 0 ? 4
: numFiltersPerGroup % 32 == 0 ? 2
: 1;
colorsPerThread = numFilterColors % 64 == 0 ? 8
: numFilterColors % 48 == 0 ? 6
: numFilterColors % 32 == 0 ? 8
: 4;
by = (numFilterColors / colorsPerThread) % 8 == 0 ? 8 : 4;
bx = numFiltersPerGroup % 128 == 0 ? 32 : 16;
preloadCases = filtersPerThread * colorsPerThread < 32 ? 32 : 16;
blocks = dim3(outputModuleChunks*(numFilters/(bx*filtersPerThread)), numFilterColors / (by*colorsPerThread), filterPixels);
THAssert(numFilterColors % (by*colorsPerThread) == 0);
} else { // This is ugly but it's nice to spell it out clearly
THAssert(numGroups == 1); // Just for sanity
// NOTE: these things are only optimized for colors = 3. I didn't really test other cases.
if (numFilters % 64 == 0) { // TODO: having a separate case for 128 would make things faster, but I probably don't care about 128
filtersPerThread = 4;
pixelsPerThread = 2;
by = 16;
bx = 16;
preloadCases = 32;
} else if (numFilters % 48 == 0) {
filtersPerThread = 3;
pixelsPerThread = 4;
by = 16;
bx = 16;
preloadCases = 32;
} else if (numFilters % 32 == 0) {
filtersPerThread = 2;
pixelsPerThread = 2;
by = 8;
bx = 16;
preloadCases = 16;
} else { // This case is completely untested. It might be really slow. But no time now.
filtersPerThread = 1;
pixelsPerThread = 16;
by = 16;
bx = 16;
preloadCases = 32;
}
blocks = dim3(outputModuleChunks*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by*pixelsPerThread));
}
THAssert((by * bx) % preloadCases == 0);
THAssert(numFilters % (bx * filtersPerThread) == 0);
threads = dim3(bx, by);
bool checkCaseBounds = numImages % preloadCases != 0;
bool scale = scaleTargets != 0;
if (!scale) {
THCudaTensor_resize2d(state, targets, outputModuleChunks * numFilterColors*filterPixels, numFilters);
} else {
THAssert(targets->size[0] == outputModuleChunks * numFilterColors*filterPixels);
THAssert(targets->size[1] == numFilters);
}
if (scale == false) {
if (checkCaseBounds == false) {
if (numFilterColors > 3) {
if (numFilterColors % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(hipDestroyTextureObject(texImages));
checkCudaErrors(hipDestroyTextureObject(texHidActs));
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 64 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(hipDestroyTextureObject(texImages));
checkCudaErrors(hipDestroyTextureObject(texHidActs));
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 48 == 0) {
if (numFiltersPerGroup % 128 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(hipDestroyTextureObject(texImages));
checkCudaErrors(hipDestroyTextureObject(texHidActs));
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); }
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 16 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFiltersPerGroup % 64 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, false, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(hipDestroyTextureObject(texImages));
checkCudaErrors(hipDestroyTextureObject(texHidActs));
} else {
hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 48 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, false, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(hipDestroyTextureObject(texImages));
checkCudaErrors(hipDestroyTextureObject(texHidActs));
} else {
hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 2) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 1) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors > 3) {
if (numFilterColors % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 48 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 16 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 2) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 1) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
}
}
}
else if (scale == true) {
if (checkCaseBounds == false) {
if (numFilterColors > 3) {
if (numFilterColors % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(hipDestroyTextureObject(texImages));
checkCudaErrors(hipDestroyTextureObject(texHidActs));
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 64 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(hipDestroyTextureObject(texImages));
checkCudaErrors(hipDestroyTextureObject(texHidActs));
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 48 == 0) {
if (numFiltersPerGroup % 128 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(hipDestroyTextureObject(texImages));
checkCudaErrors(hipDestroyTextureObject(texHidActs));
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 16 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFiltersPerGroup % 64 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, true, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(hipDestroyTextureObject(texImages));
checkCudaErrors(hipDestroyTextureObject(texHidActs));
} else {
hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 48 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
hipTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, true, false >), dim3(blocks), dim3(threads), 0, 0, texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(hipDestroyTextureObject(texImages));
checkCudaErrors(hipDestroyTextureObject(texHidActs));
} else {
hipFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 2) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 1) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors > 3) {
if (numFilterColors % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 48 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 16 == 0) {
if (numFiltersPerGroup % 128 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 2) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 1) {
if (numFiltersPerGroup % 64 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
}
}
}
getLastCudaError("weightActs: kernel execution failed");
}
void convWeightActs(THCState* state, THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum) {
_weightActs(state, images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, 0, 1);
}
void convWeightActsSt(THCState* state, THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum,
float scaleTargets, float scaleOutput) {
_weightActs(state, images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
void localWeightActs(THCState* state, THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_weightActs(state, images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1, 0, 1);
}
void localWeightActsSt(THCState* state, THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numGroups, float scaleTargets, float scaleOutput) {
_weightActs(state, images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1,
scaleTargets, scaleOutput);
}
| d5b86fa2e6d6da2d253a33b691566a9c2297ee59.cu | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../include/cudaconv2.cuh"
#include "../../nvmatrix/include/nvmatrix_kernels.cuh"
#define LO16(x) ((x) & 0x0000FFFF)
#define HI16(x) ((x) >> 16)
#define WA_LOOP(r) _Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
_Pragma("unroll") \
for (int f = 0; f < filtersPerThread; f++) { \
prod[f][c] += shImages[threadIdx.y + c * B_Y][(r)] * shHidActs[threadIdx.x + f * B_X][(r)]; \
} \
}
#define WA_LOOP2(r) _Pragma("unroll") \
for (int f = 0; f < filtersPerThread; f++) { \
_Pragma("unroll") \
for (int c = 0; c < colorsPerThread; c++) { \
prod[f][c] += shImages[threadIdx.y + c * B_Y][(r)] * shHidActs[threadIdx.x + f * B_X][(r)]; \
} \
}
#define WA_IMLOAD(r) imPreload[r] = im[(r) * B_X * B_Y / preloadCases * imgPixels * imgStride];
#define WA_IMLOAD_2(r) imPreload[r] = images[imgOffset2 + (r) * B_X * B_Y / preloadCases * imgPixels * imgStride];
#define WA_IMLOAD_TX(r) imPreload[r] = tex1Dfetch<float>(images, imgOffset2 + (r) * B_X * B_Y / preloadCases * imgPixels * imgStride);
#define WA_HALOAD(r) haPreload[r] = ha[(r) * B_X * B_Y / preloadCases * numImages * numModules];
#define WA_HALOAD_2(r) haPreload[r] = hidActs[hidActsOffset2 + (r) * B_X * B_Y / preloadCases * numImages * numModules];
#define WA_HALOAD_TX(r) haPreload[r] = tex1Dfetch<float>(hidActs, hidActsOffset2 + (r) * B_X * B_Y / preloadCases * numImages * numModules);
__device__ __forceinline__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
const int my, const int mx, const int paddingStart, const int numModulesX, const int moduleStride,
const int blockPixelY, const int blockPixelX, const int imgSizeX,
const int imgStride, int& pixIdx, int& m) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
const int imgLoadModPosX = paddingStart + mx * moduleStride;
const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image
const int pxX = imgLoadModPosX + blockPixelX;
pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
m = my * numModulesX + mx;
}
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X * filtersPerThread
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
* pixelsPerThread must be divisible by pixelCache
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_c_kepler(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int filterBlocksPerModule = numFilters / (B_X*filtersPerThread);
const int outputModuleIdx = blockIdx.x / filterBlocksPerModule;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % filterBlocksPerModule);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
images += loadX;
hidActs += blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += (outputModuleIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float prod[numColors][pixelsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][p][f] = 0;
}
}
}
__shared__ int pxIdxes[B_Y*pixelsPerThread];
//__shared__ bool isPxInImage[B_Y*pixelsPerThread];
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
__syncthreads();
if (tidx < B_Y * pixelsPerThread) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
int pxY = (imgLoadModPosY + (blockPixelOffset + tidx) / filterSize);
int pxX = (imgLoadModPosX + (blockPixelOffset + tidx) % filterSize);
int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1;
//isPxInImage[tidx] = ;
}
__syncthreads();
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (/*loadY < B_X*filtersPerThread &&*/ (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X*filtersPerThread) {
shHidActs[loadY+y][loadX]= hidActs[caseIdx + y * numImages * numModules + m * numImages];
}
}
}
#pragma unroll
for (int pp = 0; pp < pixelsPerThread; pp += pixelCache) {
//if (loadY < B_Y * pixelCache) { // This condition is not necessary for correctness, but it speeds things a bit
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some imgGrads from filter pixels that don't exit (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
const int pxIdx = pp * B_Y + loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX]= 0;
}
}
}
}
//}
__syncthreads();
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int p = 0; p < pixelCache; p++) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
prod[c][pp + p][f] += shImages[threadIdx.y + p * B_Y + c * pixelCache * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f];
}
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f];
}
}
}
}
}
}
/*
* Each block computes weight gradients for 1 pixel, B_Y * colorsPerThread colors and B_X * filtersPerThread filters
* threadIdx.x determines filter
* threadIdx.y determines color
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines color batch of B_Y * colorsPerThread
* blockIdx.z determines pixel in filter
* NOTE: blockIdx.z is limited to values < 2^16. This means that this routine will
* fail for filters >= 256*256. I'm assuming I won't ever use such large filters.
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
* B_X * B_Y must be divisible by preloadCases
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__global__ void conv_weight_acts_mc_mf_kepler(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
hidActs +=
blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
//if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[colorsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] = 0;
}
}
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image
const int pxX = imgLoadModPosX + blockPixelX;
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
// Checking this condition actually makes things faster ... :/
// So I've removed the !checkCaseBounds flag and just check it all the time.
if (caseIdx + loadX < numImages) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
if (loadY < B_Y * colorsPerThread) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) {
shImgLoad[(y) * preloadCases] = images[caseIdx + y * imgPixels * imgStride + pixIdx];
}
}
}
if (loadY < B_X * filtersPerThread) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules + m * numImages];
}
}
}
} else {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) {
shImgLoad[(y) * preloadCases] = 0;
}
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = 0;
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f];
}
}
}
}
/*
* Each block computes weight gradients for 1 pixel, B_Y * colorsPerThread colors and B_X * filtersPerThread filters
* threadIdx.x determines filter
* threadIdx.y determines color
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines color batch of B_Y * colorsPerThread
* blockIdx.z determines pixel in filter
* NOTE: blockIdx.z is limited to values < 2^16. This means that this routine will
* fail for filters >= 256*256. I'm assuming I won't ever use such large filters.
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
* B_X * B_Y must be divisible by preloadCases
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__global__ void conv_weight_acts_mc_mf_kepler_sw(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
hidActs +=
blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
//if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[colorsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] = 0;
}
}
/*
* Note; iterating this way is about 1% slower and uses a few more registers than iterating
* over the modules linearly. But it's consistent with the preload routines,
* so I'm using it.
*/
for (int my = mStartY; my < mEndY; my++) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
const int pxY = imgLoadModPosY + blockPixelY; // pixel x,y coords in image
for (int mx = mStartX; mx < mEndX; mx++) {
const int m = my * numModulesX + mx;
const int imgLoadModPosX = paddingStart + mx * moduleStride;
const int pxX = imgLoadModPosX + blockPixelX;
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
// Checking this condition actually makes things faster ... :/
// So I've removed the !checkCaseBounds flag and just check it all the time.
if (caseIdx + loadX < numImages) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
if (loadY < B_Y * colorsPerThread) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) {
shImgLoad[(y) * preloadCases] = images[caseIdx + y * imgPixels * imgStride + pixIdx];
}
}
}
if (loadY < B_X * filtersPerThread) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules + m * numImages];
}
}
}
} else {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y*colorsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y*colorsPerThread) {
shImgLoad[(y) * preloadCases] = 0;
}
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = 0;
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f];
}
}
}
}
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X * filtersPerThread
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
* pixelsPerThread must be divisible by pixelCache
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_c_kepler_sw(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X*filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
images += loadX;
hidActs += blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
+ loadX;
targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
//float* shImgLoad = &shImages[loadY][loadX];
//float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][p][f] = 0;
}
}
}
const int mStartX = blockModuleStartX;
const int mStartY = blockModuleStartY;
const int mEndX = min(numModulesX, blockModuleStartX + sumWidth);
const int mEndY = min(numModulesY, blockModuleStartY + sumWidth);
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
const int fYOff = (blockPixelOffset + tidx) / filterSize;
const int fXOff = (blockPixelOffset + tidx) % filterSize;
__shared__ int pxIdxes[B_Y*pixelsPerThread];
for (int my = mStartY; my < mEndY; my++) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
for (int mx = mStartX; mx < mEndX; mx++) {
const int m = my * numModulesX + mx;
__syncthreads();
const int imgLoadModPosX = paddingStart + mx * moduleStride;
if (tidx < B_Y * pixelsPerThread) {
// const int imgLoadModPosY = paddingStart + my * moduleStride;
// const int imgLoadModPosX = paddingStart + mx * moduleStride;
int pxY = (imgLoadModPosY + fYOff);
int pxX = (imgLoadModPosX + fXOff);
int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1;
}
__syncthreads();
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (/*loadY < B_X*filtersPerThread &&*/ (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
const int fIdx = ((loadY + y) % filtersPerThread) * B_X + (loadY + y) / filtersPerThread;
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || loadY+y < B_X*filtersPerThread) {
shHidActs[loadY+y][loadX]= hidActs[caseIdx + fIdx * numImages * numModules + m * numImages];
}
}
} else {
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// const int fIdx = ((loadY + y) % filtersPerThread) * B_X + (loadY + y) / filtersPerThread;
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X*filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || loadY+y < B_X*filtersPerThread) {
shHidActs[loadY+y][loadX] = 0;
}
}
}
#pragma unroll
for (int pp = 0; pp < pixelsPerThread; pp += pixelCache) {
//if (loadY < B_Y * pixelCache) { // This condition is not necessary for correctness, but it speeds things a bit
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some imgGrads from filter pixels that don't exit (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
const int pxIdx = pp * B_Y + loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX]= 0;
}
}
}
}
//}
__syncthreads();
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int p = 0; p < pixelCache; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][pp + p][f] += shImages[threadIdx.y + p * B_Y + c * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i];
}
}
}
}
__syncthreads();
}
}
}
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f];
}
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f];
}
}
}
}
}
}
#define WA_C3_LOOP(pp, c) _Pragma("unroll") \
for (int i = 0; i < preloadCases; i++) { \
_Pragma("unroll") \
for (int p = 0; p < pixelCache; p++) { \
_Pragma("unroll") \
for (int f = 0; f < filtersPerThread; f++) { \
prod[c][(pp) + p][f] += shImages[threadIdx.y + p * B_Y + (c) * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; \
} \
} \
}
#define WA_C3_LOOP2(pp) _Pragma("unroll") \
for (int p = 0; p < pixelCache; p++) { \
_Pragma("unroll") \
for (int i = 0; i < preloadCases; i++) { \
_Pragma("unroll") \
for (int f = 0; f < filtersPerThread; f++) { \
_Pragma("unroll") \
for (int c = 0; c < 3; ++c) { \
prod[c][(pp) + p][f] += shImages[threadIdx.y + p * B_Y + (c) * pixelCache * B_Y][i] * shHidActs[threadIdx.x * filtersPerThread + f][i]; \
} \
} \
} \
}
#define WA_3_FIDX(y) (((loadY + (y)*B_X*B_Y/preloadCases) % filtersPerThread) * B_X + (loadY + (y)*B_X*B_Y/preloadCases) / filtersPerThread)
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X * filtersPerThread
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
* pixelsPerThread must be divisible by pixelCache
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
//__launch_bounds__(256,2)
__global__ void conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X*filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
const int imgOffset = loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX;
// images += loadX;
// hidActs += blockFilterIdx * numImages * numModules
// + loadX;
targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
//float* shImgLoad = &shImages[loadY][loadX];
//float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][p][f] = 0;
}
}
}
const int mStartX = blockModuleStartX;
const int mStartY = blockModuleStartY;
const int mEndX = min(numModulesX, blockModuleStartX + sumWidth);
const int mEndY = min(numModulesY, blockModuleStartY + sumWidth);
const bool doWork = mStartY < mEndY && mStartX < mEndX;
// if (!doWork) {
// hidActs -=
// }
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
// float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12]
float haPreload[filtersPerThread * preloadCases / B_Y]; // [8]
// if (blockIdx.x != 0 || blockIdx.y !=0) {
// return;
// }
// printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY);
const int fYOff = (blockPixelOffset + tidx) / filterSize;
const int fXOff = (blockPixelOffset + tidx) % filterSize;
__shared__ int pxIdxes[B_Y*pixelsPerThread];
// __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [8]
int m = mStartY * numModulesX + mStartX;
int fidx[filtersPerThread * preloadCases / B_Y];
if (doWork) {
#pragma unroll
for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) {
const int fIdx = WA_3_FIDX(y);
// if (doWork) {
haPreload[y] = tex1Dfetch<float>(hidActs, hidActsOffset + fIdx * numImages * numModules + m * numImages);
// }
fidx[y] = fIdx * numImages * numModules;
}
}
for (int my = mStartY; my < mEndY; my++) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
for (int mx = mStartX; mx < mEndX; mx++) {
m = my * numModulesX + mx;
// __syncthreads();
const int imgLoadModPosX = paddingStart + mx * moduleStride;
if (tidx < B_Y * pixelsPerThread) {
// const int imgLoadModPosY = paddingStart + my * moduleStride;
// const int imgLoadModPosX = paddingStart + mx * moduleStride;
const int pxY = (imgLoadModPosY + fYOff);
const int pxX = (imgLoadModPosX + fXOff);
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1;
}
__syncthreads();
int myNext = my, mxNext = mx, mNext = m;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
mNext = myNext * numModulesX + mxNext;
}
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
const bool lastBatch = caseIdx + preloadCases == numImages;
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
if (lastBatch) {
// ha = &hidActs[mNext * numImages];
hidActsOffset2 = hidActsOffset + mNext * numImages;
}
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)];
}
/* ==================================================================================
* Iteration 0
* ==================================================================================
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels) {
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx);
}
}
}
}
__syncthreads();
haPreload[0] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[0]);
haPreload[1] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[1]);
WA_C3_LOOP(0,0);
haPreload[2] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[2]);
haPreload[3] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[3]);
WA_C3_LOOP(0,1);
haPreload[4] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[4]);
haPreload[5] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[5]);
WA_C3_LOOP(0,2);
haPreload[6] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[6]);
haPreload[7] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[7]);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f];
}
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
// if (threadIdx.x == 3)
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f];
}
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X * filtersPerThread
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
* pixelsPerThread must be divisible by pixelCache
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
//__launch_bounds__(256,2)
__global__ void conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X*filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
const int imgOffset = loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules + loadX;
// images += loadX;
// hidActs += blockFilterIdx * numImages * numModules
// + loadX;
targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
//float* shImgLoad = &shImages[loadY][loadX];
//float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][p][f] = 0;
}
}
}
const int mStartX = blockModuleStartX;
const int mStartY = blockModuleStartY;
const int mEndX = min(numModulesX, blockModuleStartX + sumWidth);
const int mEndY = min(numModulesY, blockModuleStartY + sumWidth);
const bool doWork = mStartY < mEndY && mStartX < mEndX;
// if (!doWork) {
// hidActs -=
// }
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
// float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12]
float haPreload[filtersPerThread * preloadCases / B_Y]; // [8]
// if (blockIdx.x != 0 || blockIdx.y !=0) {
// return;
// }
// printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY);
const int fYOff = (blockPixelOffset + tidx) / filterSize;
const int fXOff = (blockPixelOffset + tidx) % filterSize;
__shared__ int pxIdxes[B_Y*pixelsPerThread];
// __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [8]
int m = mStartY * numModulesX + mStartX;
int fidx[filtersPerThread * preloadCases / B_Y];
if (doWork) {
#pragma unroll
for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) {
const int fIdx = WA_3_FIDX(y);
// if (doWork) {
haPreload[y] = hidActs[hidActsOffset + fIdx * numImages * numModules + m * numImages];
// }
fidx[y] = fIdx * numImages * numModules;
}
}
for (int my = mStartY; my < mEndY; my++) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
for (int mx = mStartX; mx < mEndX; mx++) {
m = my * numModulesX + mx;
// __syncthreads();
const int imgLoadModPosX = paddingStart + mx * moduleStride;
if (tidx < B_Y * pixelsPerThread) {
// const int imgLoadModPosY = paddingStart + my * moduleStride;
// const int imgLoadModPosX = paddingStart + mx * moduleStride;
const int pxY = (imgLoadModPosY + fYOff);
const int pxX = (imgLoadModPosX + fXOff);
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1;
}
__syncthreads();
int myNext = my, mxNext = mx, mNext = m;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
mNext = myNext * numModulesX + mxNext;
}
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
const bool lastBatch = caseIdx + preloadCases == numImages;
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
if (lastBatch) {
// ha = &hidActs[mNext * numImages];
hidActsOffset2 = hidActsOffset + mNext * numImages;
}
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)];
}
/* ==================================================================================
* Iteration 0
* ==================================================================================
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels) {
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx];
}
}
}
}
__syncthreads();
haPreload[0] = hidActs[hidActsOffset2 + fidx[0]];
haPreload[1] = hidActs[hidActsOffset2 + fidx[1]];
WA_C3_LOOP(0,0);
haPreload[2] = hidActs[hidActsOffset2 + fidx[2]];
haPreload[3] = hidActs[hidActsOffset2 + fidx[3]];
WA_C3_LOOP(0,1);
haPreload[4] = hidActs[hidActsOffset2 + fidx[4]];
haPreload[5] = hidActs[hidActsOffset2 + fidx[5]];
WA_C3_LOOP(0,2);
haPreload[6] = hidActs[hidActsOffset2 + fidx[6]];
haPreload[7] = hidActs[hidActsOffset2 + fidx[7]];
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f];
}
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
// if (threadIdx.x == 3)
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f];
}
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X * filtersPerThread
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
* pixelsPerThread must be divisible by pixelCache
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
__launch_bounds__(256,2)
__global__ void conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X*filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
const int imgOffset = loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadX;
// images += loadX;
// hidActs += blockFilterIdx * numImages * numModules
// + loadX;
targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
//float* shImgLoad = &shImages[loadY][loadX];
//float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][p][f] = 0;
}
}
}
const int mStartX = blockModuleStartX;
const int mStartY = blockModuleStartY;
const int mEndX = min(numModulesX, blockModuleStartX + sumWidth);
const int mEndY = min(numModulesY, blockModuleStartY + sumWidth);
const bool doWork = mStartY < mEndY && mStartX < mEndX;
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
// float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12]
float haPreload[filtersPerThread * preloadCases / B_Y]; // [6]
// if (blockIdx.x != 0 || blockIdx.y !=0) {
// return;
// }
// printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY);
const int fYOff = (blockPixelOffset + tidx) / filterSize;
const int fXOff = (blockPixelOffset + tidx) % filterSize;
__shared__ int pxIdxes[B_Y*pixelsPerThread];
// __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [6]
int m = mStartY * numModulesX + mStartX;
int fidx[filtersPerThread * preloadCases / B_Y];
// if (doWork) {
#pragma unroll
for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) {
fidx[y] = WA_3_FIDX(y) * numImages * numModules;
if (doWork) { // Not actually necessary, I think
haPreload[y] = tex1Dfetch<float>(hidActs, hidActsOffset + fidx[y] + m * numImages);
}
}
// }
int mNext = mStartY * numModulesX + mStartX;
for (int my = mStartY; my < mEndY; my++) {
// const int imgLoadModPosY = paddingStart + my * moduleStride;
for (int mx = mStartX; mx < mEndX; mx++) {
m = mNext;//my * numModulesX + mx;
// __syncthreads();
// const int imgLoadModPosX = paddingStart + mx * moduleStride;
if (tidx < B_Y * pixelsPerThread) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
const int imgLoadModPosX = paddingStart + mx * moduleStride;
const int pxY = (imgLoadModPosY + fYOff);
const int pxX = (imgLoadModPosX + fXOff);
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1;
}
__syncthreads();
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
mNext = lastModule * m + !lastModule * ((my + (mx + 1 == mEndX)) * numModulesX + (mx + 1 == mEndX ? mStartX : mx + 1));
// if (!lastModule) {
// const int mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
// const int myNext = my + (mx + 1 == mEndX);
// mNext = myNext * numModulesX + mxNext;
// }
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
const bool lastBatch = caseIdx + preloadCases == numImages;
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = hidActs + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages;
const int hidActsOffset2 = hidActsOffset + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages;
// if (lastBatch) {
// ha = &hidActs[mNext * numImages];
// }
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)];
}
/* ==================================================================================
* Iteration 0
* ==================================================================================
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
}
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx);
}
}
}
}
__syncthreads();
haPreload[0] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[0]);
haPreload[1] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[1]);
haPreload[2] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[2]);
haPreload[3] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[3]);
haPreload[4] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[4]);
haPreload[5] = tex1Dfetch<float>(hidActs, hidActsOffset2 + fidx[5]);
WA_C3_LOOP2(0);
__syncthreads();
/* ==================================================================================
* Iteration 1
* ==================================================================================
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
// const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
}
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = tex1Dfetch<float>(images, imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx);
}
}
}
}
__syncthreads();
WA_C3_LOOP2(2);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f];
}
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f];
}
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X * filtersPerThread
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
* pixelsPerThread must be divisible by pixelCache
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelCache, int pixelsPerThread, int filtersPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
__launch_bounds__(256,2)
__global__ void conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelCache * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X * filtersPerThread][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X*filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
const int blockFilterIdx = B_X * filtersPerThread* (blockIdx.x % numFilterBlocks);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
const int imgOffset = loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadX;
// images += loadX;
// hidActs += blockFilterIdx * numImages * numModules
// + loadX;
targets += (blockModuleChunkIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
//float* shImgLoad = &shImages[loadY][loadX];
//float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread][filtersPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][p][f] = 0;
}
}
}
const int mStartX = blockModuleStartX;
const int mStartY = blockModuleStartY;
const int mEndX = min(numModulesX, blockModuleStartX + sumWidth);
const int mEndY = min(numModulesY, blockModuleStartY + sumWidth);
const bool doWork = mStartY < mEndY && mStartX < mEndX;
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
// float imPreload[pixelCache * numColors * preloadCases / B_X]; // [12]
float haPreload[filtersPerThread * preloadCases / B_Y]; // [6]
// if (blockIdx.x != 0 || blockIdx.y !=0) {
// return;
// }
// printf("mStartX: %d, mStartX: %d, mStartX: %d, mStartX: %d\n", mStartX, mStartY, mEndX, mEndY);
const int fYOff = (blockPixelOffset + tidx) / filterSize;
const int fXOff = (blockPixelOffset + tidx) % filterSize;
__shared__ int pxIdxes[B_Y*pixelsPerThread];
// __shared__ int fidx[filtersPerThread * preloadCases / B_Y]; // [6]
int m = mStartY * numModulesX + mStartX;
int fidx[filtersPerThread * preloadCases / B_Y];
// if (doWork) {
#pragma unroll
for (int y = 0; y < filtersPerThread * preloadCases / B_Y; ++y) {
fidx[y] = WA_3_FIDX(y) * numImages * numModules;
if (doWork) { // Not actually necessary, I think
haPreload[y] = hidActs[hidActsOffset + fidx[y] + m * numImages];
}
}
// }
int mNext = mStartY * numModulesX + mStartX;
for (int my = mStartY; my < mEndY; my++) {
// const int imgLoadModPosY = paddingStart + my * moduleStride;
for (int mx = mStartX; mx < mEndX; mx++) {
m = mNext;//my * numModulesX + mx;
// __syncthreads();
// const int imgLoadModPosX = paddingStart + mx * moduleStride;
if (tidx < B_Y * pixelsPerThread) {
const int imgLoadModPosY = paddingStart + my * moduleStride;
const int imgLoadModPosX = paddingStart + mx * moduleStride;
const int pxY = (imgLoadModPosY + fYOff);
const int pxX = (imgLoadModPosX + fXOff);
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
pxIdxes[tidx] = pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX ? pixIdx : -1;
}
__syncthreads();
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
mNext = lastModule * m + !lastModule * ((my + (mx + 1 == mEndX)) * numModulesX + (mx + 1 == mEndX ? mStartX : mx + 1));
// if (!lastModule) {
// const int mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
// const int myNext = my + (mx + 1 == mEndX);
// mNext = myNext * numModulesX + mxNext;
// }
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
const bool lastBatch = caseIdx + preloadCases == numImages;
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = hidActs + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages;
const int hidActsOffset2 = hidActsOffset + !lastBatch * (caseIdx + preloadCases + m * numImages) + lastBatch * mNext * numImages;
// if (lastBatch) {
// ha = &hidActs[mNext * numImages];
// }
#pragma unroll
for (int y = 0; y < B_X*filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActs[loadY+y][loadX] = haPreload[y*preloadCases/(B_X*B_Y)];
}
/* ==================================================================================
* Iteration 0
* ==================================================================================
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
}
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
const int pxIdx = 0 * B_Y + loadY + y; // pixel idx in filter
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx];
}
}
}
}
__syncthreads();
haPreload[0] = hidActs[hidActsOffset2 + fidx[0]];
haPreload[1] = hidActs[hidActsOffset2 + fidx[1]];
haPreload[2] = hidActs[hidActsOffset2 + fidx[2]];
haPreload[3] = hidActs[hidActsOffset2 + fidx[3]];
haPreload[4] = hidActs[hidActsOffset2 + fidx[4]];
haPreload[5] = hidActs[hidActsOffset2 + fidx[5]];
WA_C3_LOOP2(0);
__syncthreads();
/* ==================================================================================
* Iteration 1
* ==================================================================================
*/
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
// const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = 0;
}
}
}
#pragma unroll
for (int y = 0; y < B_Y * pixelCache; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelCache) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelCache) {
const int pxIdx = 2 * B_Y + loadY + y; // pixel idx in filter
const int pixIdx = pxIdxes[pxIdx];//(pxY * imgSizeX + pxX) * imgStride;
if (pixIdx >= 0 && pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[loadY+y + c * pixelCache * B_Y][loadX] = images[imgOffset + caseIdx + c * imgPixels * imgStride + pixIdx];
}
}
}
}
__syncthreads();
WA_C3_LOOP2(2);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][p][f];
}
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][p][f];
}
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__launch_bounds__(128, 4)
__global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
// const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
// images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
//
// hidActs +=
// blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
// + loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
// if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
// const bool doWork = mStartY < mEndY && mStartX < mEndX;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float imPreload[preloadCases*colorsPerThread/B_X]; // [8]
float haPreload[preloadCases*filtersPerThread/B_Y]; // [8]
float prod[filtersPerThread][colorsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[f][c] = 0;
}
}
int pixIdx, pixIdxNext, m, mNext;
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
mStartY, mStartX, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdx, m);
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
// It's bizarre, but this is the fastest way I've found to get it not to load nonexistent pixels.
// All other ways cause crazy excessive register usage.
const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * imgPixels * imgStride + pixIdx);
imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + idx);
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Almost certainly not necessary here.
const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * numImages * numModules + m * numImages);
haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + idx);
}
for (int my = mStartY; my < mEndY; my++) {
for (int mx = mStartX; mx < mEndX; mx++) {
int myNext = my, mxNext = mx;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
}
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
myNext, mxNext, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdxNext, mNext);
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)];
}
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx;
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
if (caseIdx + preloadCases == numImages) {
pixIdx = pixIdxNext;
m = mNext;
imgOffset2 = imgOffset + pixIdxNext;
hidActsOffset2 = hidActsOffset + mNext * numImages;
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)];
}
__syncthreads();
#pragma unroll
for (int z = 0; z < 8; ++z) {
WA_IMLOAD_TX(z);
WA_LOOP2(z);
}
#pragma unroll
for (int z = 0; z < 8; ++z) {
WA_HALOAD_TX(z);
WA_LOOP2(z+8);
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c];
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__launch_bounds__(128, 4)
__global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
// const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
// images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
//
// hidActs +=
// blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
// + loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
// if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
// if (mStartY == mEndY || mStartX == mEndX) {
// return;
// }
// const bool doWork = mStartY < mEndY && mStartX < mEndX;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float imPreload[preloadCases*colorsPerThread/B_X]; // [8]
float haPreload[preloadCases*filtersPerThread/B_Y]; // [8]
float prod[filtersPerThread][colorsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[f][c] = 0;
}
}
int pixIdx, pixIdxNext, m, mNext;
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
mStartY, mStartX, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdx, m);
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
// It's bizarre, but this is the fastest way I've found to get it not to load nonexistent pixels.
// All other ways cause crazy excessive register usage.
const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * imgPixels * imgStride + pixIdx);
imPreload[y * preloadCases/(B_X * B_Y)] = images[imgOffset + idx];
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Almost certainly not necessary here.
const int idx = (mStartY < mEndY && mStartX < mEndX) * (0 + y * numImages * numModules + m * numImages);
haPreload[y * preloadCases / (B_X * B_Y)] = hidActs[hidActsOffset + idx];
}
for (int my = mStartY; my < mEndY; my++) {
for (int mx = mStartX; mx < mEndX; mx++) {
int myNext = my, mxNext = mx;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
}
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
myNext, mxNext, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdxNext, mNext);
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)];
}
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx;
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
if (caseIdx + preloadCases == numImages) {
pixIdx = pixIdxNext;
m = mNext;
imgOffset2 = imgOffset + pixIdxNext;
hidActsOffset2 = hidActsOffset + mNext * numImages;
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)];
}
__syncthreads();
#pragma unroll
for (int z = 0; z < 8; ++z) {
WA_IMLOAD_2(z);
WA_LOOP2(z);
}
#pragma unroll
for (int z = 0; z < 8; ++z) {
WA_HALOAD_2(z);
WA_LOOP2(z+8);
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c];
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__launch_bounds__(256, 2)
__global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
// const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
// images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
//
// hidActs +=
// blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
// + loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
const bool doWork = mStartY < mEndY && mStartX < mEndX;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float imPreload[preloadCases*colorsPerThread/B_X]; // [6]
float haPreload[preloadCases*filtersPerThread/B_Y]; // [16]
float prod[filtersPerThread][colorsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[f][c] = 0;
}
}
int pixIdx, pixIdxNext, m, mNext;
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
mStartY, mStartX, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdx, m);
if (doWork) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + y * imgPixels * imgStride + pixIdx);
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + y * numImages * numModules + m * numImages);
}
}
for (int my = mStartY; my < mEndY; my++) {
for (int mx = mStartX; mx < mEndX; mx++) {
int myNext = my, mxNext = mx;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
}
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
myNext, mxNext, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdxNext, mNext);
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)];
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)];
}
__syncthreads();
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx;
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
if (caseIdx + preloadCases == numImages) {
pixIdx = pixIdxNext;
m = mNext;
imgOffset2 = imgOffset + pixIdxNext;
hidActsOffset2 = hidActsOffset + mNext * numImages;
}
WA_LOOP(0);
WA_LOOP(1);
WA_LOOP(2);
WA_LOOP(3);
WA_LOOP(4);
WA_LOOP(5);
WA_IMLOAD_TX(0);
WA_LOOP(6);
WA_IMLOAD_TX(1);
WA_LOOP(7);
WA_IMLOAD_TX(2);
WA_LOOP(8);
WA_IMLOAD_TX(3);
WA_LOOP(9);
WA_IMLOAD_TX(4);
WA_LOOP(10);
WA_IMLOAD_TX(5);
WA_LOOP(11);
WA_HALOAD_TX(0);
WA_LOOP(12);
WA_HALOAD_TX(1);
WA_LOOP(13);
WA_HALOAD_TX(2);
WA_LOOP(14);
WA_HALOAD_TX(3);
WA_LOOP(15);
WA_HALOAD_TX(4);
WA_LOOP(16);
WA_HALOAD_TX(5);
WA_LOOP(17);
WA_HALOAD_TX(6);
WA_LOOP(18);
WA_HALOAD_TX(7);
WA_LOOP(19);
WA_HALOAD_TX(8);
WA_LOOP(20);
WA_HALOAD_TX(9);
WA_LOOP(21);
WA_HALOAD_TX(10);
WA_LOOP(22);
WA_HALOAD_TX(11);
WA_LOOP(23);
WA_HALOAD_TX(12);
WA_LOOP(24);
WA_HALOAD_TX(13);
WA_LOOP(25);
WA_HALOAD_TX(14);
WA_LOOP(26);
WA_HALOAD_TX(15);
WA_LOOP(27);
WA_LOOP(28);
WA_LOOP(29);
WA_LOOP(30);
WA_LOOP(31);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c];
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__launch_bounds__(256, 2)
__global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
// const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
// images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
//
// hidActs +=
// blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
// + loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
const bool doWork = mStartY < mEndY && mStartX < mEndX;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float imPreload[preloadCases*colorsPerThread/B_X]; // [6]
float haPreload[preloadCases*filtersPerThread/B_Y]; // [16]
float prod[filtersPerThread][colorsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[f][c] = 0;
}
}
int pixIdx, pixIdxNext, m, mNext;
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
mStartY, mStartX, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdx, m);
if (doWork) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
imPreload[y * preloadCases/(B_X * B_Y)] = images[imgOffset + y * imgPixels * imgStride + pixIdx];
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
haPreload[y * preloadCases / (B_X * B_Y)] = hidActs[hidActsOffset + y * numImages * numModules + m * numImages];
}
}
for (int my = mStartY; my < mEndY; my++) {
for (int mx = mStartX; mx < mEndX; mx++) {
int myNext = my, mxNext = mx;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
}
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
myNext, mxNext, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdxNext, mNext);
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)];
}
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)];
}
__syncthreads();
// const float* im = &images[caseIdx + preloadCases + pixIdx];
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx;
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
if (caseIdx + preloadCases == numImages) {
pixIdx = pixIdxNext;
m = mNext;
imgOffset2 = imgOffset + pixIdxNext;
hidActsOffset2 = hidActsOffset + mNext * numImages;
}
WA_LOOP(0);
WA_LOOP(1);
WA_LOOP(2);
WA_LOOP(3);
WA_LOOP(4);
WA_LOOP(5);
WA_IMLOAD_2(0);
WA_LOOP(6);
WA_IMLOAD_2(1);
WA_LOOP(7);
WA_IMLOAD_2(2);
WA_LOOP(8);
WA_IMLOAD_2(3);
WA_LOOP(9);
WA_IMLOAD_2(4);
WA_LOOP(10);
WA_IMLOAD_2(5);
WA_LOOP(11);
WA_HALOAD_2(0);
WA_LOOP(12);
WA_HALOAD_2(1);
WA_LOOP(13);
WA_HALOAD_2(2);
WA_LOOP(14);
WA_HALOAD_2(3);
WA_LOOP(15);
WA_HALOAD_2(4);
WA_LOOP(16);
WA_HALOAD_2(5);
WA_LOOP(17);
WA_HALOAD_2(6);
WA_LOOP(18);
WA_HALOAD_2(7);
WA_LOOP(19);
WA_HALOAD_2(8);
WA_LOOP(20);
WA_HALOAD_2(9);
WA_LOOP(21);
WA_HALOAD_2(10);
WA_LOOP(22);
WA_HALOAD_2(11);
WA_LOOP(23);
WA_HALOAD_2(12);
WA_LOOP(24);
WA_HALOAD_2(13);
WA_LOOP(25);
WA_HALOAD_2(14);
WA_LOOP(26);
WA_HALOAD_2(15);
WA_LOOP(27);
WA_LOOP(28);
WA_LOOP(29);
WA_LOOP(30);
WA_LOOP(31);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c];
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__launch_bounds__(256, 2)
__global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex(cudaTextureObject_t images, cudaTextureObject_t hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
// const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
// images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
//
// hidActs +=
// blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
// + loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
// if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
const bool doWork = mStartY < mEndY && mStartX < mEndX;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float imPreload[preloadCases*colorsPerThread/B_X]; // [4]
float haPreload[preloadCases*filtersPerThread/B_Y]; // [8]
float prod[filtersPerThread][colorsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[f][c] = 0;
}
}
int pixIdx, pixIdxNext, m, mNext;
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
mStartY, mStartX, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdx, m);
if (doWork && loadY < B_Y * colorsPerThread) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
imPreload[y * preloadCases/(B_X * B_Y)] = tex1Dfetch<float>(images, imgOffset + y * imgPixels * imgStride + pixIdx);
}
}
if (doWork && loadY < B_X * filtersPerThread) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
haPreload[y * preloadCases / (B_X * B_Y)] = tex1Dfetch<float>(hidActs, hidActsOffset + y * numImages * numModules + m * numImages);
}
}
for (int my = mStartY; my < mEndY; my++) {
for (int mx = mStartX; mx < mEndX; mx++) {
int myNext = my, mxNext = mx;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
}
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
myNext, mxNext, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdxNext, mNext);
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
// const float* im = &images[caseIdx + preloadCases + pixIdx];
int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx;
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
if (caseIdx + preloadCases == numImages) {
pixIdx = pixIdxNext;
m = mNext;
// im = &images[pixIdxNext];
imgOffset2 = imgOffset + pixIdxNext;
hidActsOffset2 = hidActsOffset + mNext * numImages;
// ha = &hidActs[mNext * numImages];
}
if (loadY < B_Y * colorsPerThread) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)];
}
}
if (loadY < B_X * filtersPerThread) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)];
}
}
__syncthreads();
WA_LOOP(0);
WA_IMLOAD_TX(0);
WA_LOOP(1);
WA_IMLOAD_TX(1);
WA_LOOP(2);
WA_IMLOAD_TX(2);
WA_LOOP(3);
WA_IMLOAD_TX(3);
WA_LOOP(4);
WA_HALOAD_TX(0);
WA_LOOP(5);
WA_HALOAD_TX(1);
WA_LOOP(6);
WA_HALOAD_TX(2);
WA_LOOP(7);
WA_HALOAD_TX(3);
WA_LOOP(8);
WA_HALOAD_TX(4);
WA_LOOP(9);
WA_HALOAD_TX(5);
WA_LOOP(10);
WA_HALOAD_TX(6);
WA_LOOP(11);
WA_HALOAD_TX(7);
WA_LOOP(12);
WA_LOOP(13);
WA_LOOP(14);
WA_LOOP(15);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c];
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale>
__launch_bounds__(256, 2)
__global__ void conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int sumWidth,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int blockModuleChunkIdx = blockIdx.x / numFilterBlocks;
const int numModuleChunksX = DIVUP(numModulesX, sumWidth);
// const int numModuleChunksY = DIVUP(numModulesY, sumWidth);
const int blockModuleChunkX = blockModuleChunkIdx % numModuleChunksX;
const int blockModuleChunkY = blockModuleChunkIdx / numModuleChunksX;
const int blockModuleStartX = blockModuleChunkX * sumWidth;
const int blockModuleStartY = blockModuleChunkY * sumWidth;
// const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = blockIdx.z; // pixel idx in filter
const int blockPixelY = blockPixelOffset / filterSize, blockPixelX = blockPixelOffset % filterSize;
const int blockFilterColorIdx = blockIdx.y * B_Y * colorsPerThread;
const int imgColorIdx = blockFilterColorIdx + blockGroupIdx * numFilterColors;
const int imgOffset = (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
// images += (imgColorIdx + loadY) * imgPixels * imgStride + loadX;
const int hidActsOffset = blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
//
// hidActs +=
// blockFilterIdx * numImages * numModules
// + loadY * numImages * numModules
// + loadX;
targets += blockModuleChunkIdx * numFilters * filterPixels * numFilterColors
+ (blockFilterColorIdx + threadIdx.y) * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.x;
// if (blockIdx.x != 0 || blockIdx.y != 0 || blockIdx.z != 0) return;
const int mStartX = max(blockModuleStartX, DIVUP(-blockPixelX - paddingStart, moduleStride));
const int mStartY = max(blockModuleStartY, DIVUP(-blockPixelY - paddingStart, moduleStride));
const int mEndX = min(numModulesX, min(blockModuleStartX + sumWidth, DIVUP(imgSizeX - blockPixelX - paddingStart, moduleStride)));
const int mEndY = min(numModulesY, min(blockModuleStartY + sumWidth, DIVUP(imgSizeY - blockPixelY - paddingStart, moduleStride)));
const bool doWork = mStartY < mEndY && mStartX < mEndX;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float imPreload[preloadCases*colorsPerThread/B_X]; // [4]
float haPreload[preloadCases*filtersPerThread/B_Y]; // [8]
float prod[filtersPerThread][colorsPerThread];
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
prod[f][c] = 0;
}
}
int pixIdx, pixIdxNext, m, mNext;
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
mStartY, mStartX, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdx, m);
if (doWork && loadY < B_Y * colorsPerThread) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
imPreload[y * preloadCases/(B_X * B_Y)] = images[imgOffset + y * imgPixels * imgStride + pixIdx];
}
}
if (doWork && loadY < B_X * filtersPerThread) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
haPreload[y * preloadCases / (B_X * B_Y)] = hidActs[hidActsOffset + y * numImages * numModules + m * numImages];
}
}
for (int my = mStartY; my < mEndY; my++) {
for (int mx = mStartX; mx < mEndX; mx++) {
int myNext = my, mxNext = mx;
const bool lastModule = my == mEndY - 1 && mx == mEndX - 1;
if (!lastModule) {
mxNext = mx + 1 == mEndX ? mStartX : mx + 1;
myNext = my + (mx + 1 == mEndX);
}
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_setCoords(
myNext, mxNext, paddingStart, numModulesX, moduleStride,
blockPixelY, blockPixelX, imgSizeX, imgStride,
pixIdxNext, mNext);
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
// const float* im = &images[caseIdx + preloadCases + pixIdx];
int imgOffset2 = imgOffset + caseIdx + preloadCases + pixIdx;
int hidActsOffset2 = hidActsOffset + caseIdx + preloadCases + m * numImages;
// const float* ha = &hidActs[caseIdx + preloadCases + m * numImages];
if (caseIdx + preloadCases == numImages) {
pixIdx = pixIdxNext;
m = mNext;
// im = &images[pixIdxNext];
imgOffset2 = imgOffset + pixIdxNext;
hidActsOffset2 = hidActsOffset + mNext * numImages;
// ha = &hidActs[mNext * numImages];
}
if (loadY < B_Y * colorsPerThread) {
#pragma unroll
for (int y = 0; y < B_Y * colorsPerThread; y += (B_X * B_Y) / preloadCases) {
shImgLoad[(y) * preloadCases] = imPreload[y * preloadCases / (B_X * B_Y)];
}
}
if (loadY < B_X * filtersPerThread) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
shHidActLoad[y * (preloadCases + 1)] = haPreload[y * preloadCases / (B_X * B_Y)];
}
}
__syncthreads();
WA_LOOP(0);
WA_IMLOAD_2(0);
WA_LOOP(1);
WA_IMLOAD_2(1);
WA_LOOP(2);
WA_IMLOAD_2(2);
WA_LOOP(3);
WA_IMLOAD_2(3);
WA_LOOP(4);
WA_HALOAD_2(0);
WA_LOOP(5);
WA_HALOAD_2(1);
WA_LOOP(6);
WA_HALOAD_2(2);
WA_LOOP(7);
WA_HALOAD_2(3);
WA_LOOP(8);
WA_HALOAD_2(4);
WA_LOOP(9);
WA_HALOAD_2(5);
WA_LOOP(10);
WA_HALOAD_2(6);
WA_LOOP(11);
WA_HALOAD_2(7);
WA_LOOP(12);
WA_LOOP(13);
WA_LOOP(14);
WA_LOOP(15);
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * B_Y * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[f][c];
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[c * B_Y * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[f][c];
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModules, numImages)
*
* targets: (numModuleY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*
* TODO: you can get a slight speed boost for local non-convolutional units by writing special
* routines for partialSum = 1. But I dunno if the code duplication is worth it...
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _weightActs(THCState* state, THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors,
int numGroups, int sumWidth, float scaleTargets, float scaleOutput) {
int numFilterColors = numImgColors / numGroups;
int imgStride = images->stride[0];
int numImages = images->size[1];
int imgPixels = images->size[0] / numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int numModules = numModulesY * numModulesX;
int numFilters = hidActs->size[0] / numModules;
int numFiltersPerGroup = numFilters / numGroups;
THAssert(numImgColors % numGroups == 0);
THAssert(numFilters % (16*numGroups) == 0);
THAssert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 16 == 0)));
THAssert(numGroups == 1 || numFilterColors % 16 == 0);
THAssert(imgSizeY * imgSizeX == imgPixels);
THAssert(images->size[0] == imgPixels * numImgColors);
int filterPixels = filterSize * filterSize;
int outputModuleChunksX = DIVUP(numModulesX, sumWidth);
int outputModuleChunksY = DIVUP(numModulesY, sumWidth);
int outputModuleChunks = outputModuleChunksX * outputModuleChunksY;
// partialSum = partialSum == 0 ? numModules : partialSum;
// THAssert(numModules % partialSum == 0);
THAssert(hidActs->size[1] == numImages);
// These routines don't handle the case when only part of the image is visited in the convolution
THAssert(paddingStart <= 0);
THAssert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
THAssert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
THAssert(moduleStride <= filterSize);
THAssert(numModules * numFilters == hidActs->size[0]);
THAssert(THCudaTensor_isContiguous(state, hidActs));
THAssert(THCudaTensor_isContiguous(state, targets));
int preloadCases = 32;
dim3 blocks, threads;
int bx, by;
int pixelsPerThread, filtersPerThread, colorsPerThread;
// Worth playing with these parameters to find best values for your problem.
// These values work relatively well, but not optimal for all problems.
if (numFilterColors > 3) {
filtersPerThread = numFiltersPerGroup % 64 == 0 ? 4
: numFiltersPerGroup % 32 == 0 ? 2
: 1;
colorsPerThread = numFilterColors % 64 == 0 ? 8
: numFilterColors % 48 == 0 ? 6
: numFilterColors % 32 == 0 ? 8
: 4;
by = (numFilterColors / colorsPerThread) % 8 == 0 ? 8 : 4;
bx = numFiltersPerGroup % 128 == 0 ? 32 : 16;
preloadCases = filtersPerThread * colorsPerThread < 32 ? 32 : 16;
blocks = dim3(outputModuleChunks*(numFilters/(bx*filtersPerThread)), numFilterColors / (by*colorsPerThread), filterPixels);
THAssert(numFilterColors % (by*colorsPerThread) == 0);
} else { // This is ugly but it's nice to spell it out clearly
THAssert(numGroups == 1); // Just for sanity
// NOTE: these things are only optimized for colors = 3. I didn't really test other cases.
if (numFilters % 64 == 0) { // TODO: having a separate case for 128 would make things faster, but I probably don't care about 128
filtersPerThread = 4;
pixelsPerThread = 2;
by = 16;
bx = 16;
preloadCases = 32;
} else if (numFilters % 48 == 0) {
filtersPerThread = 3;
pixelsPerThread = 4;
by = 16;
bx = 16;
preloadCases = 32;
} else if (numFilters % 32 == 0) {
filtersPerThread = 2;
pixelsPerThread = 2;
by = 8;
bx = 16;
preloadCases = 16;
} else { // This case is completely untested. It might be really slow. But no time now.
filtersPerThread = 1;
pixelsPerThread = 16;
by = 16;
bx = 16;
preloadCases = 32;
}
blocks = dim3(outputModuleChunks*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by*pixelsPerThread));
}
THAssert((by * bx) % preloadCases == 0);
THAssert(numFilters % (bx * filtersPerThread) == 0);
threads = dim3(bx, by);
bool checkCaseBounds = numImages % preloadCases != 0;
bool scale = scaleTargets != 0;
if (!scale) {
THCudaTensor_resize2d(state, targets, outputModuleChunks * numFilterColors*filterPixels, numFilters);
} else {
THAssert(targets->size[0] == outputModuleChunks * numFilterColors*filterPixels);
THAssert(targets->size[1] == numFilters);
}
if (scale == false) {
if (checkCaseBounds == false) {
if (numFilterColors > 3) {
if (numFilterColors % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(cudaDestroyTextureObject(texImages));
checkCudaErrors(cudaDestroyTextureObject(texHidActs));
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 64 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(cudaDestroyTextureObject(texImages));
checkCudaErrors(cudaDestroyTextureObject(texHidActs));
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 48 == 0) {
if (numFiltersPerGroup % 128 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(cudaDestroyTextureObject(texImages));
checkCudaErrors(cudaDestroyTextureObject(texHidActs));
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput); }
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 16 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFiltersPerGroup % 64 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, false, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(cudaDestroyTextureObject(texImages));
checkCudaErrors(cudaDestroyTextureObject(texHidActs));
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 48 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, false, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(cudaDestroyTextureObject(texImages));
checkCudaErrors(cudaDestroyTextureObject(texHidActs));
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 2) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 1) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors > 3) {
if (numFilterColors % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 48 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 16 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, false, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, false, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 2) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 1) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
}
}
}
else if (scale == true) {
if (checkCaseBounds == false) {
if (numFilterColors > 3) {
if (numFilterColors % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16_tex< 8, 32, 4, 8, 16, true ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(cudaDestroyTextureObject(texImages));
checkCudaErrors(cudaDestroyTextureObject(texHidActs));
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_8_r_16< 8, 32, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 64 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16_tex< 8, 16, 4, 8, 16, true ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(cudaDestroyTextureObject(texImages));
checkCudaErrors(cudaDestroyTextureObject(texHidActs));
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_16_f_4_c_8_r_16< 8, 16, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 48 == 0) {
if (numFiltersPerGroup % 128 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32_tex< 8, 32, 4, 6, 32, true ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(cudaDestroyTextureObject(texImages));
checkCudaErrors(cudaDestroyTextureObject(texHidActs));
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_preload_ty_8_tx_32_f_4_c_6_r_32< 8, 32, 4, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 16 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFiltersPerGroup % 64 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3_tex < 16, 16, 2, 2, 4, 32, 3, true, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(cudaDestroyTextureObject(texImages));
checkCudaErrors(cudaDestroyTextureObject(texHidActs));
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_preload_pc_2_pt_2_f_4_r_32_c_3 < 16, 16, 2, 2, 4, 32, 3, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 48 == 0) {
if ((THCudaTensor_nElement(state, images)*4 < TEXTURE_SIZE_MAX) && (THCudaTensor_nElement(state, hidActs)*4 < TEXTURE_SIZE_MAX)) {
cudaTextureObject_t texImages = THCudaTensor_getTextureObject(state, images);
cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs);
cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3_tex < 16, 16, 2, 4, 3, 32, 3, true, false ><<<blocks, threads, 0>>>(texImages, texHidActs, THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
checkCudaErrors(cudaDestroyTextureObject(texImages));
checkCudaErrors(cudaDestroyTextureObject(texHidActs));
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_preload_pc_2_pt_4_f_3_r_32_c_3 < 16, 16, 2, 4, 3, 32, 3, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 2) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 1) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, false >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
}
}
else if (checkCaseBounds == true) {
if (numFilterColors > 3) {
if (numFilterColors % 64 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 48 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 32, 4, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 4, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 2, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 8, 16, 1, 6, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 32 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 8, 16, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 8, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors % 16 == 0) {
if (numFiltersPerGroup % 128 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 32, 4, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 4, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 2, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true >, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_kepler_sw < 4, 16, 1, 4, 32, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, sumWidth, scaleTargets, scaleOutput);
}
}
}
else if (numFilterColors <= 3) {
if (numFilterColors == 3) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, true, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 3, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, true, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 3, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 3, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 3, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 2) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 2, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 2, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 2, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 2, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
else if (numFilterColors == 1) {
if (numFiltersPerGroup % 64 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 2, 4, 32, 1, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 48 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 4, 3, 32, 1, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 8, 16, 2, 2, 2, 16, 1, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
else if (numFiltersPerGroup % 16 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, true >, cudaFuncCachePreferShared);
conv_weight_acts_c_kepler_sw < 16, 16, 2, 16, 1, 32, 1, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, images), THCudaTensor_data(state, hidActs), THCudaTensor_data(state, targets), numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, sumWidth, scaleTargets, scaleOutput);
}
}
}
}
}
getLastCudaError("weightActs: kernel execution failed");
}
void convWeightActs(THCState* state, THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum) {
_weightActs(state, images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, 0, 1);
}
void convWeightActsSt(THCState* state, THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum,
float scaleTargets, float scaleOutput) {
_weightActs(state, images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
void localWeightActs(THCState* state, THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_weightActs(state, images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1, 0, 1);
}
void localWeightActsSt(THCState* state, THCudaTensor* images, THCudaTensor* hidActs, THCudaTensor* targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numGroups, float scaleTargets, float scaleOutput) {
_weightActs(state, images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1,
scaleTargets, scaleOutput);
}
|
705db291669438be2ab2f92f37b7c7bf1c21f83e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_t1;
int xdim0_update_halo_kernel1_t1_h = -1;
__constant__ int xdim1_update_halo_kernel1_t1;
int xdim1_update_halo_kernel1_t1_h = -1;
__constant__ int xdim2_update_halo_kernel1_t1;
int xdim2_update_halo_kernel1_t1_h = -1;
__constant__ int xdim3_update_halo_kernel1_t1;
int xdim3_update_halo_kernel1_t1_h = -1;
__constant__ int xdim4_update_halo_kernel1_t1;
int xdim4_update_halo_kernel1_t1_h = -1;
__constant__ int xdim5_update_halo_kernel1_t1;
int xdim5_update_halo_kernel1_t1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x, y) (x + xdim0_update_halo_kernel1_t1 * (y))
#define OPS_ACC1(x, y) (x + xdim1_update_halo_kernel1_t1 * (y))
#define OPS_ACC2(x, y) (x + xdim2_update_halo_kernel1_t1 * (y))
#define OPS_ACC3(x, y) (x + xdim3_update_halo_kernel1_t1 * (y))
#define OPS_ACC4(x, y) (x + xdim4_update_halo_kernel1_t1 * (y))
#define OPS_ACC5(x, y) (x + xdim5_update_halo_kernel1_t1 * (y))
// user function
__device__
inline void
update_halo_kernel1_t1_gpu(double *density0, double *energy0,
double *energy1, double *u, double *p,
double *sd, const int *fields) {
if (fields[FIELD_DENSITY] == 1)
density0[OPS_ACC0(0, 0)] = density0[OPS_ACC0(0, -1)];
if (fields[FIELD_ENERGY0] == 1)
energy0[OPS_ACC1(0, 0)] = energy0[OPS_ACC1(0, -1)];
if (fields[FIELD_ENERGY1] == 1)
energy1[OPS_ACC2(0, 0)] = energy1[OPS_ACC2(0, -1)];
if (fields[FIELD_U] == 1)
u[OPS_ACC3(0, 0)] = u[OPS_ACC3(0, -1)];
if (fields[FIELD_P] == 1)
p[OPS_ACC4(0, 0)] = p[OPS_ACC4(0, -1)];
if (fields[FIELD_SD] == 1)
sd[OPS_ACC5(0, 0)] = sd[OPS_ACC5(0, -1)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void
ops_update_halo_kernel1_t1(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
const int *__restrict arg6, int size0, int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_t1;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_t1;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_t1;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_t1;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_t1;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_t1;
if (idx_x < size0 && idx_y < size1) {
update_halo_kernel1_t1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_t1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[7] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 7, range, 52))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(52, "update_halo_kernel1_t1");
OPS_kernels[52].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != xdim0_update_halo_kernel1_t1_h ||
xdim1 != xdim1_update_halo_kernel1_t1_h ||
xdim2 != xdim2_update_halo_kernel1_t1_h ||
xdim3 != xdim3_update_halo_kernel1_t1_h ||
xdim4 != xdim4_update_halo_kernel1_t1_h ||
xdim5 != xdim5_update_halo_kernel1_t1_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel1_t1, &xdim0, sizeof(int));
xdim0_update_halo_kernel1_t1_h = xdim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel1_t1, &xdim1, sizeof(int));
xdim1_update_halo_kernel1_t1_h = xdim1;
hipMemcpyToSymbol(xdim2_update_halo_kernel1_t1, &xdim2, sizeof(int));
xdim2_update_halo_kernel1_t1_h = xdim2;
hipMemcpyToSymbol(xdim3_update_halo_kernel1_t1, &xdim3, sizeof(int));
xdim3_update_halo_kernel1_t1_h = xdim3;
hipMemcpyToSymbol(xdim4_update_halo_kernel1_t1, &xdim4, sizeof(int));
xdim4_update_halo_kernel1_t1_h = xdim4;
hipMemcpyToSymbol(xdim5_update_halo_kernel1_t1, &xdim5, sizeof(int));
xdim5_update_halo_kernel1_t1_h = xdim5;
}
int *arg6h = (int *)arg6.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args, 7, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[52].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel1_t1), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (int *)arg6.data_d, x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[52].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[52].mpi_time += t2 - t1;
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 52;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 52;
for (int i = 0; i < 4; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg *)malloc(7 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg6.data, NUM_FIELDS * sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_t1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(52, "update_halo_kernel1_t1");
}
ops_enqueue_kernel(desc);
}
#endif
| 705db291669438be2ab2f92f37b7c7bf1c21f83e.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_t1;
int xdim0_update_halo_kernel1_t1_h = -1;
__constant__ int xdim1_update_halo_kernel1_t1;
int xdim1_update_halo_kernel1_t1_h = -1;
__constant__ int xdim2_update_halo_kernel1_t1;
int xdim2_update_halo_kernel1_t1_h = -1;
__constant__ int xdim3_update_halo_kernel1_t1;
int xdim3_update_halo_kernel1_t1_h = -1;
__constant__ int xdim4_update_halo_kernel1_t1;
int xdim4_update_halo_kernel1_t1_h = -1;
__constant__ int xdim5_update_halo_kernel1_t1;
int xdim5_update_halo_kernel1_t1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x, y) (x + xdim0_update_halo_kernel1_t1 * (y))
#define OPS_ACC1(x, y) (x + xdim1_update_halo_kernel1_t1 * (y))
#define OPS_ACC2(x, y) (x + xdim2_update_halo_kernel1_t1 * (y))
#define OPS_ACC3(x, y) (x + xdim3_update_halo_kernel1_t1 * (y))
#define OPS_ACC4(x, y) (x + xdim4_update_halo_kernel1_t1 * (y))
#define OPS_ACC5(x, y) (x + xdim5_update_halo_kernel1_t1 * (y))
// user function
__device__
inline void
update_halo_kernel1_t1_gpu(double *density0, double *energy0,
double *energy1, double *u, double *p,
double *sd, const int *fields) {
if (fields[FIELD_DENSITY] == 1)
density0[OPS_ACC0(0, 0)] = density0[OPS_ACC0(0, -1)];
if (fields[FIELD_ENERGY0] == 1)
energy0[OPS_ACC1(0, 0)] = energy0[OPS_ACC1(0, -1)];
if (fields[FIELD_ENERGY1] == 1)
energy1[OPS_ACC2(0, 0)] = energy1[OPS_ACC2(0, -1)];
if (fields[FIELD_U] == 1)
u[OPS_ACC3(0, 0)] = u[OPS_ACC3(0, -1)];
if (fields[FIELD_P] == 1)
p[OPS_ACC4(0, 0)] = p[OPS_ACC4(0, -1)];
if (fields[FIELD_SD] == 1)
sd[OPS_ACC5(0, 0)] = sd[OPS_ACC5(0, -1)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void
ops_update_halo_kernel1_t1(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
const int *__restrict arg6, int size0, int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_t1;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_t1;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_t1;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_t1;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_t1;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_t1;
if (idx_x < size0 && idx_y < size1) {
update_halo_kernel1_t1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_t1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[7] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 7, range, 52))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(52, "update_halo_kernel1_t1");
OPS_kernels[52].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != xdim0_update_halo_kernel1_t1_h ||
xdim1 != xdim1_update_halo_kernel1_t1_h ||
xdim2 != xdim2_update_halo_kernel1_t1_h ||
xdim3 != xdim3_update_halo_kernel1_t1_h ||
xdim4 != xdim4_update_halo_kernel1_t1_h ||
xdim5 != xdim5_update_halo_kernel1_t1_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel1_t1, &xdim0, sizeof(int));
xdim0_update_halo_kernel1_t1_h = xdim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel1_t1, &xdim1, sizeof(int));
xdim1_update_halo_kernel1_t1_h = xdim1;
cudaMemcpyToSymbol(xdim2_update_halo_kernel1_t1, &xdim2, sizeof(int));
xdim2_update_halo_kernel1_t1_h = xdim2;
cudaMemcpyToSymbol(xdim3_update_halo_kernel1_t1, &xdim3, sizeof(int));
xdim3_update_halo_kernel1_t1_h = xdim3;
cudaMemcpyToSymbol(xdim4_update_halo_kernel1_t1, &xdim4, sizeof(int));
xdim4_update_halo_kernel1_t1_h = xdim4;
cudaMemcpyToSymbol(xdim5_update_halo_kernel1_t1, &xdim5, sizeof(int));
xdim5_update_halo_kernel1_t1_h = xdim5;
}
int *arg6h = (int *)arg6.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args, 7, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[52].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel1_t1<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (int *)arg6.data_d, x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[52].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[52].mpi_time += t2 - t1;
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 52;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 52;
for (int i = 0; i < 4; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg *)malloc(7 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int));
memcpy(tmp, arg6.data, NUM_FIELDS * sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_t1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(52, "update_halo_kernel1_t1");
}
ops_enqueue_kernel(desc);
}
#endif
|
06c6b739357c78492f002aa7e734147c7cbe391c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "common.h"
#include "radix.h"
#include "efficient.h"
#include "device_launch_parameters.h"
#include <math.h>
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
#define MAX_BIT 6
#define blockSize 128
int* data;
int* output;
int* B;
int* E;
int* F;
int* T;
int* D;
namespace StreamCompaction {
namespace Radix {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void compute_b_e(int n, int position, int* input, int* B, int* E) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
int value = input[index];
int bit = (value >> position) & 1;
if (bit == 0) {
B[index] = 0;
E[index] = 1;
}
else {
B[index] = 1;
E[index] = 0;
}
}
__global__ void compute_total_falses(int n, int* totalFalses, int* E, int* F) {
*totalFalses = E[n - 1] + F[n - 1];
}
__global__ void compute_t(int n, int* F, int* totalFalses, int* T) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
T[index] = index - F[index] + (*totalFalses);
}
__global__ void compute_d(int n, int* B, int* T, int* F, int* D) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
D[index] = B[index] ? T[index] : F[index];
}
__global__ void scatter(int n, int* indices, int* inp, int* op) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
op[indices[index]] = inp[index];
}
void sort(int n, int* odata, int*idata) {
hipMalloc((void**)&data, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc data failed!");
hipMalloc((void**)&output, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc output failed!");
hipMemcpy(data, idata, sizeof(int) * n, hipMemcpyHostToDevice);
hipMalloc((void**)&B, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc B failed!");
hipMalloc((void**)&E, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc E failed!");
hipMalloc((void**)&F, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc F failed!");
hipMalloc((void**)&T, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc T failed!");
hipMalloc((void**)&D, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc D failed!");
int* totalFalses;
hipMalloc((void**)&totalFalses, sizeof(int));
checkCUDAErrorWithLine("hipMalloc totalFalses failed!");
timer().startGpuTimer();
for (int i = 0; i < MAX_BIT; i++) {
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
compute_b_e << <fullBlocksPerGrid, blockSize >>> (n, i, data, B, E);
//Scan E and store in F
StreamCompaction::Efficient::scan_device(n, F, E, blockSize);
compute_total_falses << <1,1>> > (n, totalFalses, E, F);
compute_t << <fullBlocksPerGrid, blockSize >> > (n, F, totalFalses, T);
compute_d << <fullBlocksPerGrid, blockSize >> > (n, B, T, F, D);
//Scatter as per D
scatter << <fullBlocksPerGrid, blockSize >> > (n, D, data, output);
//Copy output back to input
hipMemcpy(data, output, sizeof(int) * n, hipMemcpyDeviceToDevice);
}
timer().endGpuTimer();
hipMemcpy(odata, output, sizeof(int) * n, hipMemcpyDeviceToHost);
}
}
} | 06c6b739357c78492f002aa7e734147c7cbe391c.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "common.h"
#include "radix.h"
#include "efficient.h"
#include "device_launch_parameters.h"
#include <math.h>
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
#define MAX_BIT 6
#define blockSize 128
int* data;
int* output;
int* B;
int* E;
int* F;
int* T;
int* D;
namespace StreamCompaction {
namespace Radix {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void compute_b_e(int n, int position, int* input, int* B, int* E) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
int value = input[index];
int bit = (value >> position) & 1;
if (bit == 0) {
B[index] = 0;
E[index] = 1;
}
else {
B[index] = 1;
E[index] = 0;
}
}
__global__ void compute_total_falses(int n, int* totalFalses, int* E, int* F) {
*totalFalses = E[n - 1] + F[n - 1];
}
__global__ void compute_t(int n, int* F, int* totalFalses, int* T) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
T[index] = index - F[index] + (*totalFalses);
}
__global__ void compute_d(int n, int* B, int* T, int* F, int* D) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
D[index] = B[index] ? T[index] : F[index];
}
__global__ void scatter(int n, int* indices, int* inp, int* op) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
op[indices[index]] = inp[index];
}
void sort(int n, int* odata, int*idata) {
cudaMalloc((void**)&data, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc data failed!");
cudaMalloc((void**)&output, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc output failed!");
cudaMemcpy(data, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
cudaMalloc((void**)&B, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc B failed!");
cudaMalloc((void**)&E, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc E failed!");
cudaMalloc((void**)&F, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc F failed!");
cudaMalloc((void**)&T, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc T failed!");
cudaMalloc((void**)&D, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc D failed!");
int* totalFalses;
cudaMalloc((void**)&totalFalses, sizeof(int));
checkCUDAErrorWithLine("cudaMalloc totalFalses failed!");
timer().startGpuTimer();
for (int i = 0; i < MAX_BIT; i++) {
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
compute_b_e << <fullBlocksPerGrid, blockSize >>> (n, i, data, B, E);
//Scan E and store in F
StreamCompaction::Efficient::scan_device(n, F, E, blockSize);
compute_total_falses << <1,1>> > (n, totalFalses, E, F);
compute_t << <fullBlocksPerGrid, blockSize >> > (n, F, totalFalses, T);
compute_d << <fullBlocksPerGrid, blockSize >> > (n, B, T, F, D);
//Scatter as per D
scatter << <fullBlocksPerGrid, blockSize >> > (n, D, data, output);
//Copy output back to input
cudaMemcpy(data, output, sizeof(int) * n, cudaMemcpyDeviceToDevice);
}
timer().endGpuTimer();
cudaMemcpy(odata, output, sizeof(int) * n, cudaMemcpyDeviceToHost);
}
}
} |
43a2d7e8c2bb25f8590d01134d874f5be6b69b85.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
// a contains the image, b contains the kernel filter, and r is the matrix where the answer will be returned
__global__ void kernelConvolution(double *a, double *b, double *r, int size_col, int size_row, int size, int kernel_size) {
int i = threadIdx.x+blockDim.x*blockIdx.x;
int j = threadIdx.y+blockDim.y*blockIdx.y;
int k,l;
int pos = i+(j*size_col);
double sum = 0;
int cont = 0;
// Iterate throught the kernel filter
for (l = 0; l < kernel_size; l++){
for (k = 0; k < kernel_size; k++){
sum += a[(pos+l)+(k*size_row)] * b[cont];
cont++;
}
}
// Save the value of the pixel in the vector r "result"
r[i+(j*size_row)] = sum;
}
float convolutionDeviceKernel(double *a, double *b, double *r, int size_col, int size_row, int kernel_size) {
double *aD, *bD,*rD;
int size=size_col*size_row*sizeof(double);
int size_k = kernel_size*kernel_size*sizeof(double);
hipEvent_t start, stop;
float t;
// Define the dimensions of the blocks and kernels
dim3 bloques((int)floor((double)size_row/kernel_size*1.0),(int)floor((double)size_col/kernel_size*1.0));
dim3 hilos(kernel_size,kernel_size);
hipEventCreate(&start); hipEventCreate(&stop);
hipMalloc(&aD, size);hipMalloc(&rD, size); hipMalloc(&bD, size_k);
// Obtain the values for a and b from the host
hipMemcpy(aD, a, size, hipMemcpyDefault);
hipMemcpy(bD, b, size_k , hipMemcpyDefault);
// Start counting the execution time
hipEventRecord(start, 0);
// Convolucion
hipLaunchKernelGGL(( kernelConvolution), dim3(bloques), dim3(hilos), 0, 0, aD, bD, rD, size_col,size_row,size,kernel_size);
// Stop counting the execution time
hipEventRecord(stop, 0);
// Copy to host
hipMemcpy(a, aD,size, hipMemcpyDefault);
hipMemcpy(b, bD,size_k, hipMemcpyDefault);
hipMemcpy(r, rD,size, hipMemcpyDefault);
// Free memory
hipFree(aD); hipFree(bD); hipFree(rD);
hipEventSynchronize(stop);
hipEventElapsedTime(&t, start, stop);
hipEventDestroy(start); hipEventDestroy(stop);
// Return the time elapsed
return t;
}
////////////// MAIN ///////////////////////////////////
int main(int argc, char **argv) {
int size_col;
int size_row;
int d=0, kl=0;
int size_k = 9*sizeof(double);
float t;
int i,j;
double *tmp;
double *tmp2;
// a=image, b=kernel filter, r = result image
double *a,*b,*r;
// Select the parameters
int image_select = 2;
int kernel_select = 3;
int noIterations = 10;
// Receive parameters from the console
if (argc > 1){
d = atoi(argv[1]);
}
if (argc > 2){
image_select = atoi(argv[2]);
}
if (argc > 3){
kernel_select = atoi(argv[3]);
}
// Select in which card the code is going to be executed
hipSetDevice(d);
// Select the image
char name[13];
if (image_select == 1){
strcpy(name,"lena.txt");
size_col = 512;
size_row = 512;
}else if (image_select == 2){
strcpy(name,"tran.txt");
size_col = 1200;
size_row = 800;
}else if(image_select == 3){
strcpy(name,"land.txt");
size_col = 3840;
size_row = 2160;
}
// Allocate memory in the device
int size = size_col*size_row*sizeof(double);
// Image
hipHostMalloc(&a, size , hipHostMallocDefault);
// Kernel Filter
hipHostMalloc(&b, size_k , hipHostMallocDefault);
// Result
hipHostMalloc(&r, size , hipHostMallocDefault);
// Load the kernel filter
int kernel_size = 3;
double kernel[kernel_size][kernel_size];
srand(time(NULL));
if(kernel_select == 1){
//CREATE A KERNEL FILTER WITH RANDOM VALUES
double *temp3=b;
for(i = 0; i<kernel_size; i++){
for(j = 0; j<kernel_size; j++){
*temp3 = (rand()%100) /100.00;
temp3++;
}
}
}else if(kernel_select ==2){
//LAPLACIAN KERNEL FILTER
double kernel_vec[9] ={0,-1,0,-1,4,-1,0,-1,0};
double *temp3 =b;
for(i =0;i<9;i++){
*temp3 = kernel_vec[i];
temp3++;
}
}else if(kernel_select == 3){
//LOW PASS KERNEL FILTER
double kernel_vec[9] ={.1111,.1111,.1111,.1111,.1111,.1111,.1111,.1111,.1111};
double *temp3 = b;
for(i =0;i<9;i++){
*temp3 = kernel_vec[i];
temp3++;
}
}
// Reading the filter from file
FILE *img;
img = fopen(name,"r");
if(img==NULL){
printf("Error loading image\n");
return 0;
}
// Scan image
for(i=0, tmp=a; i<size_col*size_row;++i){
fscanf(img,"%lf",tmp++);
}
// Kernel execution
float sum = 0;
float s_time = 0;
for (i=0; i<noIterations;i++){
t = convolutionDeviceKernel(a,b,r,size_col,size_row,kernel_size);
sum += t;
}
s_time = sum/noIterations;
// Print the execution time
printf("El tiempo de ejecucion es %f sg\n", s_time/1000.0);
// Write the resulting image in a CSV file
FILE *nuevo;
nuevo = fopen("filtered_image.csv","w");
if(nuevo==NULL){
printf("Error loading filter\n");
return 0;
}
for(i=1, tmp2=r; i<size_col*size_row;++i){
if(i%size_col*size_row==0 && i>0){
fprintf(nuevo,"%lf",*tmp2);
fprintf(nuevo,"\n");
}else
{
fprintf(nuevo,"%lf,",*tmp2);
}
tmp2++;
}
fprintf(nuevo,"%lf",*tmp2);
// Close the files img and nuevo
fclose(img);
fclose(nuevo);
// Free memory from the device
hipHostFree(a);
hipHostFree(b);
hipHostFree(r);
}
| 43a2d7e8c2bb25f8590d01134d874f5be6b69b85.cu | #include <stdio.h>
#include <stdlib.h>
// a contains the image, b contains the kernel filter, and r is the matrix where the answer will be returned
__global__ void kernelConvolution(double *a, double *b, double *r, int size_col, int size_row, int size, int kernel_size) {
int i = threadIdx.x+blockDim.x*blockIdx.x;
int j = threadIdx.y+blockDim.y*blockIdx.y;
int k,l;
int pos = i+(j*size_col);
double sum = 0;
int cont = 0;
// Iterate throught the kernel filter
for (l = 0; l < kernel_size; l++){
for (k = 0; k < kernel_size; k++){
sum += a[(pos+l)+(k*size_row)] * b[cont];
cont++;
}
}
// Save the value of the pixel in the vector r "result"
r[i+(j*size_row)] = sum;
}
float convolutionDeviceKernel(double *a, double *b, double *r, int size_col, int size_row, int kernel_size) {
double *aD, *bD,*rD;
int size=size_col*size_row*sizeof(double);
int size_k = kernel_size*kernel_size*sizeof(double);
cudaEvent_t start, stop;
float t;
// Define the dimensions of the blocks and kernels
dim3 bloques((int)floor((double)size_row/kernel_size*1.0),(int)floor((double)size_col/kernel_size*1.0));
dim3 hilos(kernel_size,kernel_size);
cudaEventCreate(&start); cudaEventCreate(&stop);
cudaMalloc(&aD, size);cudaMalloc(&rD, size); cudaMalloc(&bD, size_k);
// Obtain the values for a and b from the host
cudaMemcpy(aD, a, size, cudaMemcpyDefault);
cudaMemcpy(bD, b, size_k , cudaMemcpyDefault);
// Start counting the execution time
cudaEventRecord(start, 0);
// Convolucion
kernelConvolution<<<bloques, hilos>>>(aD, bD, rD, size_col,size_row,size,kernel_size);
// Stop counting the execution time
cudaEventRecord(stop, 0);
// Copy to host
cudaMemcpy(a, aD,size, cudaMemcpyDefault);
cudaMemcpy(b, bD,size_k, cudaMemcpyDefault);
cudaMemcpy(r, rD,size, cudaMemcpyDefault);
// Free memory
cudaFree(aD); cudaFree(bD); cudaFree(rD);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&t, start, stop);
cudaEventDestroy(start); cudaEventDestroy(stop);
// Return the time elapsed
return t;
}
////////////// MAIN ///////////////////////////////////
int main(int argc, char **argv) {
int size_col;
int size_row;
int d=0, kl=0;
int size_k = 9*sizeof(double);
float t;
int i,j;
double *tmp;
double *tmp2;
// a=image, b=kernel filter, r = result image
double *a,*b,*r;
// Select the parameters
int image_select = 2;
int kernel_select = 3;
int noIterations = 10;
// Receive parameters from the console
if (argc > 1){
d = atoi(argv[1]);
}
if (argc > 2){
image_select = atoi(argv[2]);
}
if (argc > 3){
kernel_select = atoi(argv[3]);
}
// Select in which card the code is going to be executed
cudaSetDevice(d);
// Select the image
char name[13];
if (image_select == 1){
strcpy(name,"lena.txt");
size_col = 512;
size_row = 512;
}else if (image_select == 2){
strcpy(name,"tran.txt");
size_col = 1200;
size_row = 800;
}else if(image_select == 3){
strcpy(name,"land.txt");
size_col = 3840;
size_row = 2160;
}
// Allocate memory in the device
int size = size_col*size_row*sizeof(double);
// Image
cudaHostAlloc(&a, size , cudaHostAllocDefault);
// Kernel Filter
cudaHostAlloc(&b, size_k , cudaHostAllocDefault);
// Result
cudaHostAlloc(&r, size , cudaHostAllocDefault);
// Load the kernel filter
int kernel_size = 3;
double kernel[kernel_size][kernel_size];
srand(time(NULL));
if(kernel_select == 1){
//CREATE A KERNEL FILTER WITH RANDOM VALUES
double *temp3=b;
for(i = 0; i<kernel_size; i++){
for(j = 0; j<kernel_size; j++){
*temp3 = (rand()%100) /100.00;
temp3++;
}
}
}else if(kernel_select ==2){
//LAPLACIAN KERNEL FILTER
double kernel_vec[9] ={0,-1,0,-1,4,-1,0,-1,0};
double *temp3 =b;
for(i =0;i<9;i++){
*temp3 = kernel_vec[i];
temp3++;
}
}else if(kernel_select == 3){
//LOW PASS KERNEL FILTER
double kernel_vec[9] ={.1111,.1111,.1111,.1111,.1111,.1111,.1111,.1111,.1111};
double *temp3 = b;
for(i =0;i<9;i++){
*temp3 = kernel_vec[i];
temp3++;
}
}
// Reading the filter from file
FILE *img;
img = fopen(name,"r");
if(img==NULL){
printf("Error loading image\n");
return 0;
}
// Scan image
for(i=0, tmp=a; i<size_col*size_row;++i){
fscanf(img,"%lf",tmp++);
}
// Kernel execution
float sum = 0;
float s_time = 0;
for (i=0; i<noIterations;i++){
t = convolutionDeviceKernel(a,b,r,size_col,size_row,kernel_size);
sum += t;
}
s_time = sum/noIterations;
// Print the execution time
printf("El tiempo de ejecucion es %f sg\n", s_time/1000.0);
// Write the resulting image in a CSV file
FILE *nuevo;
nuevo = fopen("filtered_image.csv","w");
if(nuevo==NULL){
printf("Error loading filter\n");
return 0;
}
for(i=1, tmp2=r; i<size_col*size_row;++i){
if(i%size_col*size_row==0 && i>0){
fprintf(nuevo,"%lf",*tmp2);
fprintf(nuevo,"\n");
}else
{
fprintf(nuevo,"%lf,",*tmp2);
}
tmp2++;
}
fprintf(nuevo,"%lf",*tmp2);
// Close the files img and nuevo
fclose(img);
fclose(nuevo);
// Free memory from the device
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(r);
}
|
ae40536d3d2d536df016bd124763136d782ade65.hip | // !!! This is a file automatically generated by hipify!!!
#define LIMIT -999
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "needle.h"
#include <hip/hip_runtime.h>
#include <sys/time.h>
// includes, kernels
#include "needle_kernel.hip"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int blosum62[24][24] = {
{ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4},
{ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4},
{ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4},
{ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4},
{-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}
};
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
printf("WG size of kernel = %d \n", BLOCK_SIZE);
runTest( argc, argv);
return EXIT_SUCCESS;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]);
fprintf(stderr, "\t<dimension> - x and y dimensions\n");
fprintf(stderr, "\t<penalty> - penalty(positive integer)\n");
exit(1);
}
void runTest( int argc, char** argv)
{
int max_rows, max_cols, penalty;
int *input_itemsets, *output_itemsets, *referrence;
int *matrix_cuda, *referrence_cuda;
int size;
// the lengths of the two sequences should be able to divided by 16.
// And at current stage max_rows needs to equal max_cols
if (argc == 3)
{
max_rows = atoi(argv[1]);
max_cols = atoi(argv[1]);
penalty = atoi(argv[2]);
}
else{
usage(argc, argv);
}
if(atoi(argv[1])%16!=0){
fprintf(stderr,"The dimension values must be a multiple of 16\n");
exit(1);
}
max_rows = max_rows + 1;
max_cols = max_cols + 1;
// referrence = (int *)malloc( max_rows * max_cols * sizeof(int) );
// input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
// output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
hipHostMalloc((void **)&referrence,max_rows * max_cols * sizeof(int));
hipHostMalloc((void **)&input_itemsets,max_rows * max_cols * sizeof(int));
hipHostMalloc((void **)&output_itemsets,max_rows * max_cols * sizeof(int));
if (!input_itemsets)
fprintf(stderr, "error: can not allocate memory");
srand ( 7 );
for (int i = 0 ; i < max_cols; i++){
for (int j = 0 ; j < max_rows; j++){
input_itemsets[i*max_cols+j] = 0;
}
}
printf("Start Needleman-Wunsch\n");
for( int i=1; i< max_rows ; i++){ //please define your own sequence.
input_itemsets[i*max_cols] = rand() % 10 + 1;
}
for( int j=1; j< max_cols ; j++){ //please define your own sequence.
input_itemsets[j] = rand() % 10 + 1;
}
for (int i = 1 ; i < max_cols; i++){
for (int j = 1 ; j < max_rows; j++){
referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]];
}
}
for( int i = 1; i< max_rows ; i++)
input_itemsets[i*max_cols] = -i * penalty;
for( int j = 1; j< max_cols ; j++)
input_itemsets[j] = -j * penalty;
size = max_cols * max_rows;
hipMalloc((void**)& referrence_cuda, sizeof(int)*size);
hipMalloc((void**)& matrix_cuda, sizeof(int)*size);
hipMemcpy(referrence_cuda, referrence, sizeof(int) * size, hipMemcpyHostToDevice);
hipMemcpy(matrix_cuda, input_itemsets, sizeof(int) * size, hipMemcpyHostToDevice);
dim3 dimGrid;
dim3 dimBlock(BLOCK_SIZE, 1);
int block_width = ( max_cols - 1 )/BLOCK_SIZE;
printf("Processing top-left matrix\n");
//process top-left matrix
for( int i = 1 ; i <= block_width ; i++){
dimGrid.x = i;
dimGrid.y = 1;
hipLaunchKernelGGL(( needle_cuda_shared_1), dim3(dimGrid), dim3(dimBlock), 0, 0, referrence_cuda, matrix_cuda
,max_cols, penalty, i, block_width);
}
printf("Processing bottom-right matrix\n");
//process bottom-right matrix
for( int i = block_width - 1 ; i >= 1 ; i--){
dimGrid.x = i;
dimGrid.y = 1;
hipLaunchKernelGGL(( needle_cuda_shared_2), dim3(dimGrid), dim3(dimBlock), 0, 0, referrence_cuda, matrix_cuda
,max_cols, penalty, i, block_width);
}
hipMemcpy(output_itemsets, matrix_cuda, sizeof(int) * size, hipMemcpyDeviceToHost);
//#define TRACEBACK
#ifdef TRACEBACK
FILE *fpo = fopen("result.txt","w");
fprintf(fpo, "print traceback value GPU:\n");
for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){
int nw, n, w, traceback;
if ( i == max_rows - 2 && j == max_rows - 2 )
fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element
if ( i == 0 && j == 0 )
break;
if ( i > 0 && j > 0 ){
nw = output_itemsets[(i - 1) * max_cols + j - 1];
w = output_itemsets[ i * max_cols + j - 1 ];
n = output_itemsets[(i - 1) * max_cols + j];
}
else if ( i == 0 ){
nw = n = LIMIT;
w = output_itemsets[ i * max_cols + j - 1 ];
}
else if ( j == 0 ){
nw = w = LIMIT;
n = output_itemsets[(i - 1) * max_cols + j];
}
else{
}
//traceback = maximum(nw, w, n);
int new_nw, new_w, new_n;
new_nw = nw + referrence[i * max_cols + j];
new_w = w - penalty;
new_n = n - penalty;
traceback = maximum(new_nw, new_w, new_n);
if(traceback == new_nw)
traceback = nw;
if(traceback == new_w)
traceback = w;
if(traceback == new_n)
traceback = n;
fprintf(fpo, "%d ", traceback);
if(traceback == nw )
{i--; j--; continue;}
else if(traceback == w )
{j--; continue;}
else if(traceback == n )
{i--; continue;}
else
;
}
fclose(fpo);
#endif
hipFree(referrence_cuda);
hipFree(matrix_cuda);
hipFree(referrence);
hipFree(input_itemsets);
hipFree(output_itemsets);
}
| ae40536d3d2d536df016bd124763136d782ade65.cu | #define LIMIT -999
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "needle.h"
#include <cuda.h>
#include <sys/time.h>
// includes, kernels
#include "needle_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int blosum62[24][24] = {
{ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4},
{ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4},
{ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4},
{ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4},
{-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}
};
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
printf("WG size of kernel = %d \n", BLOCK_SIZE);
runTest( argc, argv);
return EXIT_SUCCESS;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]);
fprintf(stderr, "\t<dimension> - x and y dimensions\n");
fprintf(stderr, "\t<penalty> - penalty(positive integer)\n");
exit(1);
}
void runTest( int argc, char** argv)
{
int max_rows, max_cols, penalty;
int *input_itemsets, *output_itemsets, *referrence;
int *matrix_cuda, *referrence_cuda;
int size;
// the lengths of the two sequences should be able to divided by 16.
// And at current stage max_rows needs to equal max_cols
if (argc == 3)
{
max_rows = atoi(argv[1]);
max_cols = atoi(argv[1]);
penalty = atoi(argv[2]);
}
else{
usage(argc, argv);
}
if(atoi(argv[1])%16!=0){
fprintf(stderr,"The dimension values must be a multiple of 16\n");
exit(1);
}
max_rows = max_rows + 1;
max_cols = max_cols + 1;
// referrence = (int *)malloc( max_rows * max_cols * sizeof(int) );
// input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
// output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
cudaMallocHost((void **)&referrence,max_rows * max_cols * sizeof(int));
cudaMallocHost((void **)&input_itemsets,max_rows * max_cols * sizeof(int));
cudaMallocHost((void **)&output_itemsets,max_rows * max_cols * sizeof(int));
if (!input_itemsets)
fprintf(stderr, "error: can not allocate memory");
srand ( 7 );
for (int i = 0 ; i < max_cols; i++){
for (int j = 0 ; j < max_rows; j++){
input_itemsets[i*max_cols+j] = 0;
}
}
printf("Start Needleman-Wunsch\n");
for( int i=1; i< max_rows ; i++){ //please define your own sequence.
input_itemsets[i*max_cols] = rand() % 10 + 1;
}
for( int j=1; j< max_cols ; j++){ //please define your own sequence.
input_itemsets[j] = rand() % 10 + 1;
}
for (int i = 1 ; i < max_cols; i++){
for (int j = 1 ; j < max_rows; j++){
referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]];
}
}
for( int i = 1; i< max_rows ; i++)
input_itemsets[i*max_cols] = -i * penalty;
for( int j = 1; j< max_cols ; j++)
input_itemsets[j] = -j * penalty;
size = max_cols * max_rows;
cudaMalloc((void**)& referrence_cuda, sizeof(int)*size);
cudaMalloc((void**)& matrix_cuda, sizeof(int)*size);
cudaMemcpy(referrence_cuda, referrence, sizeof(int) * size, cudaMemcpyHostToDevice);
cudaMemcpy(matrix_cuda, input_itemsets, sizeof(int) * size, cudaMemcpyHostToDevice);
dim3 dimGrid;
dim3 dimBlock(BLOCK_SIZE, 1);
int block_width = ( max_cols - 1 )/BLOCK_SIZE;
printf("Processing top-left matrix\n");
//process top-left matrix
for( int i = 1 ; i <= block_width ; i++){
dimGrid.x = i;
dimGrid.y = 1;
needle_cuda_shared_1<<<dimGrid, dimBlock>>>(referrence_cuda, matrix_cuda
,max_cols, penalty, i, block_width);
}
printf("Processing bottom-right matrix\n");
//process bottom-right matrix
for( int i = block_width - 1 ; i >= 1 ; i--){
dimGrid.x = i;
dimGrid.y = 1;
needle_cuda_shared_2<<<dimGrid, dimBlock>>>(referrence_cuda, matrix_cuda
,max_cols, penalty, i, block_width);
}
cudaMemcpy(output_itemsets, matrix_cuda, sizeof(int) * size, cudaMemcpyDeviceToHost);
//#define TRACEBACK
#ifdef TRACEBACK
FILE *fpo = fopen("result.txt","w");
fprintf(fpo, "print traceback value GPU:\n");
for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){
int nw, n, w, traceback;
if ( i == max_rows - 2 && j == max_rows - 2 )
fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element
if ( i == 0 && j == 0 )
break;
if ( i > 0 && j > 0 ){
nw = output_itemsets[(i - 1) * max_cols + j - 1];
w = output_itemsets[ i * max_cols + j - 1 ];
n = output_itemsets[(i - 1) * max_cols + j];
}
else if ( i == 0 ){
nw = n = LIMIT;
w = output_itemsets[ i * max_cols + j - 1 ];
}
else if ( j == 0 ){
nw = w = LIMIT;
n = output_itemsets[(i - 1) * max_cols + j];
}
else{
}
//traceback = maximum(nw, w, n);
int new_nw, new_w, new_n;
new_nw = nw + referrence[i * max_cols + j];
new_w = w - penalty;
new_n = n - penalty;
traceback = maximum(new_nw, new_w, new_n);
if(traceback == new_nw)
traceback = nw;
if(traceback == new_w)
traceback = w;
if(traceback == new_n)
traceback = n;
fprintf(fpo, "%d ", traceback);
if(traceback == nw )
{i--; j--; continue;}
else if(traceback == w )
{j--; continue;}
else if(traceback == n )
{i--; continue;}
else
;
}
fclose(fpo);
#endif
cudaFree(referrence_cuda);
cudaFree(matrix_cuda);
cudaFree(referrence);
cudaFree(input_itemsets);
cudaFree(output_itemsets);
}
|
f2e83a06c69b59bc3eb794de82b25543e153e7c7.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
using namespace std;
void queryDeviceInformation(void) {
hipDeviceProp_t prop;
int count;
hipGetDeviceCount(&count);
for (int i=0; i< count; i++) {
hipGetDeviceProperties(&prop, i);
cout << "--- General Information for device " << i << endl << endl;
cout << "Name: " << prop.name << endl;
cout << "Compute capability: " << prop.major << "." << prop.minor << endl;
cout << "Clock rate: " << prop.clockRate << endl;
cout << "Device copy overlap: ";
if (prop.deviceOverlap)
cout << "Enabled " << endl;
else
cout << "Disabled" << endl;
cout << "Kernel execution timeout: ";
if (prop.kernelExecTimeoutEnabled)
cout << "Enabled " << endl;
else
cout << "Disabled" << endl;
cout << endl;
cout << "--- Memory Information for device " << i << endl << endl;
cout << "Total global memory: " << prop.totalGlobalMem << endl;
cout << "Total constant memory: " << prop.totalConstMem << endl;
cout << "Max mem pitch: " << prop.memPitch << endl;
cout << "Texture Alignment: " << prop.textureAlignment << endl;
cout << endl;
cout << "--- MP Information for device " << i << endl << endl;
cout << "Multiprocessor count: " << prop.multiProcessorCount << endl;
cout << "Shared memory per block: " << prop.sharedMemPerBlock << endl;
cout << "Registers per block: " << prop.regsPerBlock << endl;
cout << "Threads in warp: " << prop.warpSize << endl;
cout << "Max threads per block: " << prop.maxThreadsPerBlock << endl;
cout << "Max thread dimensions: " << prop.maxThreadsDim[0] << ", "
<< prop.maxThreadsDim[1] << ", " << prop.maxThreadsDim[2] << endl;
cout << "Max grid dimensions: " << prop.maxGridSize[0] << ", "
<< prop.maxGridSize[1] << ", " << prop.maxGridSize[2] << endl;
}
}
| f2e83a06c69b59bc3eb794de82b25543e153e7c7.cu | #include <iostream>
using namespace std;
void queryDeviceInformation(void) {
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
for (int i=0; i< count; i++) {
cudaGetDeviceProperties(&prop, i);
cout << "--- General Information for device " << i << endl << endl;
cout << "Name: " << prop.name << endl;
cout << "Compute capability: " << prop.major << "." << prop.minor << endl;
cout << "Clock rate: " << prop.clockRate << endl;
cout << "Device copy overlap: ";
if (prop.deviceOverlap)
cout << "Enabled " << endl;
else
cout << "Disabled" << endl;
cout << "Kernel execution timeout: ";
if (prop.kernelExecTimeoutEnabled)
cout << "Enabled " << endl;
else
cout << "Disabled" << endl;
cout << endl;
cout << "--- Memory Information for device " << i << endl << endl;
cout << "Total global memory: " << prop.totalGlobalMem << endl;
cout << "Total constant memory: " << prop.totalConstMem << endl;
cout << "Max mem pitch: " << prop.memPitch << endl;
cout << "Texture Alignment: " << prop.textureAlignment << endl;
cout << endl;
cout << "--- MP Information for device " << i << endl << endl;
cout << "Multiprocessor count: " << prop.multiProcessorCount << endl;
cout << "Shared memory per block: " << prop.sharedMemPerBlock << endl;
cout << "Registers per block: " << prop.regsPerBlock << endl;
cout << "Threads in warp: " << prop.warpSize << endl;
cout << "Max threads per block: " << prop.maxThreadsPerBlock << endl;
cout << "Max thread dimensions: " << prop.maxThreadsDim[0] << ", "
<< prop.maxThreadsDim[1] << ", " << prop.maxThreadsDim[2] << endl;
cout << "Max grid dimensions: " << prop.maxGridSize[0] << ", "
<< prop.maxGridSize[1] << ", " << prop.maxGridSize[2] << endl;
}
}
|
d7120f5b73c5ee98ba6243bfd134b092c2518044.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layer_include/vision_layers.hpp"
// find a max class value from all examples
template <typename Dtype>
__global__ void MaxClassKernel(const int outer_num, const int classes, const int inner_num,
const Dtype* bottom_data, Dtype* scale_data){
CUDA_KERNEL_LOOP(idx, outer_num*inner_num){
int o_idx = idx / inner_num;
int i_idx = idx% inner_num;
Dtype max_val = -FLT_MAX;
for (int c = 0; c < classes; c++)
max_val = max(bottom_data[(o_idx*classes + c)*inner_num + i_idx], max_val);
scale_data[idx] = max_val;
}
}
template <typename Dtype>
__global__ void SubtractKernel(const int count, const int classes, const int inner_num,
const Dtype* scale_data, Dtype* top_data){
// count=outer_num*classes*inner_num
CUDA_KERNEL_LOOP(idx, count){
int o_idx = idx / inner_num / classes;
int i_idx = idx% inner_num;
// ignore classes
// note that scale_data shapeis [outer_num,inner_num]
top_data[idx] -= scale_data[o_idx*inner_num + i_idx];
}
}
template <typename Dtype>
__global__ void ExpKernel(const int count, Dtype* top_data){
CUDA_KERNEL_LOOP(idx,count){
top_data[idx] = exp(top_data[idx]);
}
}
template <typename Dtype>
__global__ void SumClassKernel(const int outer_num, const int classes, const int inner_num,
const Dtype* top_data, Dtype* scale_data){
CUDA_KERNEL_LOOP(idx, outer_num*inner_num){
int o_idx = idx / inner_num;
int i_idx = idx% inner_num;
Dtype sum = 0;
for (int c = 0; c < classes; c++)
sum += top_data[(o_idx*classes + c)*inner_num + i_idx];
scale_data[idx] = sum;
}
}
template <typename Dtype>
__global__ void DivKernel(const int count, const int classes, const int inner_num,
const Dtype* scale_data, Dtype* top_data){
// count=outer_num*classes*inner_num
CUDA_KERNEL_LOOP(idx, count){
int o_idx = idx / inner_num / classes;
int i_idx = idx% inner_num;
// ignore classes
// note that scale_data shapeis [outer_num,inner_num]
top_data[idx] /= scale_data[o_idx*inner_num + i_idx];
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::forward_gpu(const vector<Blob<Dtype>*> &bottom, const vector<Blob<Dtype>*> &top){
const Dtype *bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale.mutable_gpu_data();
// num_class
const int classes = bottom[0]->shape(axis);
const int count = bottom[0]->count();
// normally the dim equal to classes
// spacially if we do not connect a inner product layer before
// we may get a 4D input and dim=classes*height*width
dragon_gpu_copy(count, top_data, bottom_data);
MaxClassKernel<Dtype><< <GET_BLOCKS(inner_num*outer_num), CUDA_NUM_THREADS >> >(
outer_num, classes, inner_num, bottom_data, scale_data);
SubtractKernel<Dtype> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(
count, classes, inner_num, scale_data, top_data);
ExpKernel<Dtype> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, top_data);
SumClassKernel<Dtype> << <GET_BLOCKS(inner_num*outer_num), CUDA_NUM_THREADS >> >(
outer_num, classes, inner_num, top_data, scale_data);
DivKernel<Dtype> << <GET_BLOCKS(count),CUDA_NUM_THREADS >> >(
count, classes, inner_num, scale_data, top_data);
}
template <typename Dtype>
__global__ void DotKernel(const int outer_num, const int classes, const int inner_num,
const Dtype* top_diff, const Dtype* top_data,Dtype* scale_data){
CUDA_KERNEL_LOOP(idx, outer_num*inner_num){
int o_idx = idx / inner_num;
int i_idx = idx% inner_num;
Dtype dot = 0;
for (int c = 0; c < classes; c++)
dot += (top_data[(o_idx*classes + c)*inner_num + i_idx]
* top_diff[(o_idx*classes + c)*inner_num + i_idx]);
scale_data[idx] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::backward_gpu(const vector<Blob<Dtype>*> &top, const vector<bool> &data_need_bp, const vector<Blob<Dtype>*> &bottom){
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale.mutable_gpu_data();
int classes = top[0]->shape(axis);
int count = top[0]->count() / outer_num;
dragon_gpu_copy(count, bottom_diff, top_diff);
// softmax and loss layer is splitted in Caffe
// please read https://www.zhihu.com/question/28927103 before
// for each example
DotKernel<Dtype> << <GET_BLOCKS(inner_num*outer_num), CUDA_NUM_THREADS >> >(
outer_num, classes, inner_num, top_diff,top_data, scale_data);
SubtractKernel<Dtype> << <GET_BLOCKS(count),CUDA_NUM_THREADS >> >(
count, classes, inner_num, scale_data, bottom_diff);
dragon_gpu_mul(count, bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); | d7120f5b73c5ee98ba6243bfd134b092c2518044.cu | #include "layer_include/vision_layers.hpp"
// find a max class value from all examples
template <typename Dtype>
__global__ void MaxClassKernel(const int outer_num, const int classes, const int inner_num,
const Dtype* bottom_data, Dtype* scale_data){
CUDA_KERNEL_LOOP(idx, outer_num*inner_num){
int o_idx = idx / inner_num;
int i_idx = idx% inner_num;
Dtype max_val = -FLT_MAX;
for (int c = 0; c < classes; c++)
max_val = max(bottom_data[(o_idx*classes + c)*inner_num + i_idx], max_val);
scale_data[idx] = max_val;
}
}
template <typename Dtype>
__global__ void SubtractKernel(const int count, const int classes, const int inner_num,
const Dtype* scale_data, Dtype* top_data){
// count=outer_num*classes*inner_num
CUDA_KERNEL_LOOP(idx, count){
int o_idx = idx / inner_num / classes;
int i_idx = idx% inner_num;
// ignore classes
// note that scale_data shapeis [outer_num,inner_num]
top_data[idx] -= scale_data[o_idx*inner_num + i_idx];
}
}
template <typename Dtype>
__global__ void ExpKernel(const int count, Dtype* top_data){
CUDA_KERNEL_LOOP(idx,count){
top_data[idx] = exp(top_data[idx]);
}
}
template <typename Dtype>
__global__ void SumClassKernel(const int outer_num, const int classes, const int inner_num,
const Dtype* top_data, Dtype* scale_data){
CUDA_KERNEL_LOOP(idx, outer_num*inner_num){
int o_idx = idx / inner_num;
int i_idx = idx% inner_num;
Dtype sum = 0;
for (int c = 0; c < classes; c++)
sum += top_data[(o_idx*classes + c)*inner_num + i_idx];
scale_data[idx] = sum;
}
}
template <typename Dtype>
__global__ void DivKernel(const int count, const int classes, const int inner_num,
const Dtype* scale_data, Dtype* top_data){
// count=outer_num*classes*inner_num
CUDA_KERNEL_LOOP(idx, count){
int o_idx = idx / inner_num / classes;
int i_idx = idx% inner_num;
// ignore classes
// note that scale_data shapeis [outer_num,inner_num]
top_data[idx] /= scale_data[o_idx*inner_num + i_idx];
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::forward_gpu(const vector<Blob<Dtype>*> &bottom, const vector<Blob<Dtype>*> &top){
const Dtype *bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale.mutable_gpu_data();
// num_class
const int classes = bottom[0]->shape(axis);
const int count = bottom[0]->count();
// normally the dim equal to classes
// spacially if we do not connect a inner product layer before
// we may get a 4D input and dim=classes*height*width
dragon_gpu_copy(count, top_data, bottom_data);
MaxClassKernel<Dtype><< <GET_BLOCKS(inner_num*outer_num), CUDA_NUM_THREADS >> >(
outer_num, classes, inner_num, bottom_data, scale_data);
SubtractKernel<Dtype> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(
count, classes, inner_num, scale_data, top_data);
ExpKernel<Dtype> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, top_data);
SumClassKernel<Dtype> << <GET_BLOCKS(inner_num*outer_num), CUDA_NUM_THREADS >> >(
outer_num, classes, inner_num, top_data, scale_data);
DivKernel<Dtype> << <GET_BLOCKS(count),CUDA_NUM_THREADS >> >(
count, classes, inner_num, scale_data, top_data);
}
template <typename Dtype>
__global__ void DotKernel(const int outer_num, const int classes, const int inner_num,
const Dtype* top_diff, const Dtype* top_data,Dtype* scale_data){
CUDA_KERNEL_LOOP(idx, outer_num*inner_num){
int o_idx = idx / inner_num;
int i_idx = idx% inner_num;
Dtype dot = 0;
for (int c = 0; c < classes; c++)
dot += (top_data[(o_idx*classes + c)*inner_num + i_idx]
* top_diff[(o_idx*classes + c)*inner_num + i_idx]);
scale_data[idx] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::backward_gpu(const vector<Blob<Dtype>*> &top, const vector<bool> &data_need_bp, const vector<Blob<Dtype>*> &bottom){
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale.mutable_gpu_data();
int classes = top[0]->shape(axis);
int count = top[0]->count() / outer_num;
dragon_gpu_copy(count, bottom_diff, top_diff);
// softmax and loss layer is splitted in Caffe
// please read https://www.zhihu.com/question/28927103 before
// for each example
DotKernel<Dtype> << <GET_BLOCKS(inner_num*outer_num), CUDA_NUM_THREADS >> >(
outer_num, classes, inner_num, top_diff,top_data, scale_data);
SubtractKernel<Dtype> << <GET_BLOCKS(count),CUDA_NUM_THREADS >> >(
count, classes, inner_num, scale_data, bottom_diff);
dragon_gpu_mul(count, bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); |
6824f215c5832b4b0650c02b06f9f630785e3c8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the1(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ hipComplex the2(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ hipComplex the3(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ hipComplex qin(hipComplex a, hipComplex q)
{
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ hipComplex geffa(hipComplex z, hipComplex q)
{
hipComplex out(0.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex wu(0.0,0.0);
hipComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ hipComplex thratd(hipComplex z, hipComplex q)
{
int n;
hipComplex fau(4.0,0.0);
hipComplex too(2.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex ennn(1.0,0.0);
hipComplex ni(-1.0,0.0);
hipComplex noo(-1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex loo = q;
hipComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ hipComplex mitlef(hipComplex z,hipComplex c)
{
hipComplex out(0.0,0.0);
hipComplex Z(1.0,0.0);
hipComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ hipComplex helva(hipComplex z)
{
hipComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex hilva(hipComplex z)
{
hipComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex halva(hipComplex z)
{
hipComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex hinva(hipComplex z)
{
hipComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex henga(hipComplex z)
{
hipComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ hipComplex holva(hipComplex z)
{
hipComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ hipComplex aliva(hipComplex z)
{
hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ hipComplex ariva(hipComplex z)
{
hipComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ hipComplex arago(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex irigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex urigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex arreg(hipComplex q, hipComplex r, hipComplex z)
{
/* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are
derivatives with respect to z or q, we'll see */
hipComplex out(0.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
hipComplex morra(-1.0,0.0);
hipComplex tla(1.0,0.0);
hipComplex vnn(0.0,0.0);
hipComplex fou(4.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex run(1.0,0.0);
int v;
for(v=0;v<20;v++)
{
qoo = qoo * q;
roo = roo * r * r;
tla = tla * morra;
vnn = vnn + run;
out = out + morra*qoo*sins(tw*z*run)/(run-roo);
}
return fou*out;
}
__device__ hipComplex urreg(hipComplex q, hipComplex r, hipComplex z)
{
/* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are
derivatives with respect to z or q, we'll see */
hipComplex out(0.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
hipComplex morra(-1.0,0.0);
hipComplex tla(1.0,0.0);
hipComplex vnn(0.0,0.0);
hipComplex fou(4.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex run(1.0,0.0);
int v;
for(v=0;v<10;v++)
{
qoo = qoo * q;
roo = roo * r * r;
tla = tla * morra;
vnn = vnn + run;
out = out + morra*qoo*the3(tw*z*run,r)/(run-roo);
}
return fou*out;
}
// * small q-exponential
__device__ hipComplex qexp(hipComplex z, hipComplex q)
{
hipComplex mone(-1.0,0.0);
hipComplex une(1.0,0.0);
return une/qpoch(z,q);
}
//* large q exponential is just qpoch(-z,q)
__device__ hipComplex qExp(hipComplex z, hipComplex q)
{
hipComplex mone(-1.0,0.0);
hipComplex une(1.0,0.0);
return qpoch(mone*z,q);
}
__device__ hipComplex sinq(hipComplex z, hipComplex q)
{
hipComplex aie(0.0,1.0);
hipComplex out(0.0,0.0);
hipComplex doo(2.0,0.0);
out = (qexp(z*aie,q) -qexp(z*aie,q))/doo;
return out;
}
__device__ hipComplex cosq(hipComplex z, hipComplex q)
{
hipComplex aie(0.0,1.0);
hipComplex out(0.0,0.0);
hipComplex doo(2.0,0.0);
out = (qexp(z*aie,q) +qexp(z*aie,q))/doo;
return out;
}
__device__ hipComplex Sinq(hipComplex z, hipComplex q)
{
hipComplex aie(0.0,1.0);
hipComplex out(0.0,0.0);
hipComplex doo(2.0,0.0);
out = (qExp(z*aie,q) -qExp(z*aie,q))/doo;
return out;
}
__device__ hipComplex Cosq(hipComplex z, hipComplex q)
{
hipComplex aie(0.0,1.0);
hipComplex out(0.0,0.0);
hipComplex doo(2.0,0.0);
out = (qExp(z*aie,q) +qExp(z*aie,q))/doo;
return out;
}
__device__ hipComplex asins(hipComplex z)
{
float alp = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) + sqrtf((z.r-1)*(z.r-1) + z.i*z.i));
float bet = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) - sqrtf((z.r-1)*(z.r-1) + z.i*z.i));
float fla = z.i/abs(z.i);
// *signum, but without a comparison, probably a saner way to do this? //
hipComplex out(0.0,0.0);
out.r = asinf(bet);
out.i = fla * logf(alp + sqrtf(alp*alp-1));
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
hipComplex ip(pi,0.0);
const float scale = 2.0;
float fx = scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
hipComplex effx(fx,0.0);
hipComplex effy(fy,0.0);
float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(1.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (4.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex bor(-10.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
hipComplex gren(2.0,0.0);
hipComplex next=flurn;
hipComplex current = cue;
hipComplex xnext = flurn;
hipComplex xcurrent = cue;
hipComplex tinny(.0001,0.0001);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
int uu;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// One way of describing this would be we want to perform Newton's method
//on the Mandelbrot set
/* preiterate */
// 3/pi
hipComplex vlan(0.954929658551372,0.0);
hipComplex LQ(sqrtf(3.0)/2.0,0.0);
for(v=0;v<30;v++)
{
//cue = cue - powc(hilva(cue)-uon,hinva(cue)-aon)/(aon-powc(hinva(cue)+aon,hilva(cue)-uon));
accume = accume * (vlan*asins(aon*LQ + uon*cue));
cue = cue * q;
}
cue = accume;
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ | 6824f215c5832b4b0650c02b06f9f630785e3c8b.cu | #include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the1(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ cuComplex the2(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ cuComplex the3(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ cuComplex qin(cuComplex a, cuComplex q)
{
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ cuComplex geffa(cuComplex z, cuComplex q)
{
cuComplex out(0.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex wu(0.0,0.0);
cuComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ cuComplex thratd(cuComplex z, cuComplex q)
{
int n;
cuComplex fau(4.0,0.0);
cuComplex too(2.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex ennn(1.0,0.0);
cuComplex ni(-1.0,0.0);
cuComplex noo(-1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex loo = q;
cuComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ cuComplex mitlef(cuComplex z,cuComplex c)
{
cuComplex out(0.0,0.0);
cuComplex Z(1.0,0.0);
cuComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ cuComplex helva(cuComplex z)
{
cuComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex hilva(cuComplex z)
{
cuComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex halva(cuComplex z)
{
cuComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex hinva(cuComplex z)
{
cuComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex henga(cuComplex z)
{
cuComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ cuComplex holva(cuComplex z)
{
cuComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ cuComplex aliva(cuComplex z)
{
cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ cuComplex ariva(cuComplex z)
{
cuComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ cuComplex arago(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex irigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex urigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex arreg(cuComplex q, cuComplex r, cuComplex z)
{
/* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are
derivatives with respect to z or q, we'll see */
cuComplex out(0.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
cuComplex morra(-1.0,0.0);
cuComplex tla(1.0,0.0);
cuComplex vnn(0.0,0.0);
cuComplex fou(4.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex run(1.0,0.0);
int v;
for(v=0;v<20;v++)
{
qoo = qoo * q;
roo = roo * r * r;
tla = tla * morra;
vnn = vnn + run;
out = out + morra*qoo*sins(tw*z*run)/(run-roo);
}
return fou*out;
}
__device__ cuComplex urreg(cuComplex q, cuComplex r, cuComplex z)
{
/* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are
derivatives with respect to z or q, we'll see */
cuComplex out(0.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
cuComplex morra(-1.0,0.0);
cuComplex tla(1.0,0.0);
cuComplex vnn(0.0,0.0);
cuComplex fou(4.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex run(1.0,0.0);
int v;
for(v=0;v<10;v++)
{
qoo = qoo * q;
roo = roo * r * r;
tla = tla * morra;
vnn = vnn + run;
out = out + morra*qoo*the3(tw*z*run,r)/(run-roo);
}
return fou*out;
}
// * small q-exponential
__device__ cuComplex qexp(cuComplex z, cuComplex q)
{
cuComplex mone(-1.0,0.0);
cuComplex une(1.0,0.0);
return une/qpoch(z,q);
}
//* large q exponential is just qpoch(-z,q)
__device__ cuComplex qExp(cuComplex z, cuComplex q)
{
cuComplex mone(-1.0,0.0);
cuComplex une(1.0,0.0);
return qpoch(mone*z,q);
}
__device__ cuComplex sinq(cuComplex z, cuComplex q)
{
cuComplex aie(0.0,1.0);
cuComplex out(0.0,0.0);
cuComplex doo(2.0,0.0);
out = (qexp(z*aie,q) -qexp(z*aie,q))/doo;
return out;
}
__device__ cuComplex cosq(cuComplex z, cuComplex q)
{
cuComplex aie(0.0,1.0);
cuComplex out(0.0,0.0);
cuComplex doo(2.0,0.0);
out = (qexp(z*aie,q) +qexp(z*aie,q))/doo;
return out;
}
__device__ cuComplex Sinq(cuComplex z, cuComplex q)
{
cuComplex aie(0.0,1.0);
cuComplex out(0.0,0.0);
cuComplex doo(2.0,0.0);
out = (qExp(z*aie,q) -qExp(z*aie,q))/doo;
return out;
}
__device__ cuComplex Cosq(cuComplex z, cuComplex q)
{
cuComplex aie(0.0,1.0);
cuComplex out(0.0,0.0);
cuComplex doo(2.0,0.0);
out = (qExp(z*aie,q) +qExp(z*aie,q))/doo;
return out;
}
__device__ cuComplex asins(cuComplex z)
{
float alp = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) + sqrtf((z.r-1)*(z.r-1) + z.i*z.i));
float bet = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) - sqrtf((z.r-1)*(z.r-1) + z.i*z.i));
float fla = z.i/abs(z.i);
// *signum, but without a comparison, probably a saner way to do this? //
cuComplex out(0.0,0.0);
out.r = asinf(bet);
out.i = fla * logf(alp + sqrtf(alp*alp-1));
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
cuComplex ip(pi,0.0);
const float scale = 2.0;
float fx = scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
cuComplex effx(fx,0.0);
cuComplex effy(fy,0.0);
float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(1.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (4.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex bor(-10.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
cuComplex gren(2.0,0.0);
cuComplex next=flurn;
cuComplex current = cue;
cuComplex xnext = flurn;
cuComplex xcurrent = cue;
cuComplex tinny(.0001,0.0001);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
int uu;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// One way of describing this would be we want to perform Newton's method
//on the Mandelbrot set
/* preiterate */
// 3/pi
cuComplex vlan(0.954929658551372,0.0);
cuComplex LQ(sqrtf(3.0)/2.0,0.0);
for(v=0;v<30;v++)
{
//cue = cue - powc(hilva(cue)-uon,hinva(cue)-aon)/(aon-powc(hinva(cue)+aon,hilva(cue)-uon));
accume = accume * (vlan*asins(aon*LQ + uon*cue));
cue = cue * q;
}
cue = accume;
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ |
bae5082d884b37aee6a421cd5e34617aae1fedad.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <memory>
#include "debug.h"
#include "hooklib.h"
#include "protocol.hpp"
#include "slimfast.hpp"
#include "consumer.hpp"
#include "impl.hpp"
#include "devlogger.hpp"
__global__ void benchmark_memwrite(const int num_repeats, bool enabled, volatile unsigned int* data)
{
int id = ((blockDim.x * blockIdx.x) + threadIdx.x);
if(enabled)
{
for(int i = 0; i < num_repeats; ++i )
{
data[id] += 1;
__store_op((void*)&data[id], OP_READWRITE);
}
__threadfence_system(); // FLUSH INSTRUMENTATION
}
else
{
for(int i = 0; i < num_repeats; ++i )
{
data[id] += 1;
}
}
}
__global__ void benchmark_saxpy(const int num_repeats, bool enabled, int N, float a, float *x, float *y)
{
if(enabled)
{
for(int j = 0; j < num_repeats; ++j )
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N)
{
float vx = x[i];
__store_op(&x[i], OP_READ);
float vy = y[i];
__store_op(&y[i], OP_READ);
y[i] = a*vx + vy;
__store_op(&y[i], OP_WRITE);
}
}
__threadfence_system(); // FLUSH INSTRUMENTATION
}
else
{
for(int j = 0; j < num_repeats; ++j )
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N)
{
float vx = x[i];
float vy = y[i];
y[i] = a*vx + vy;
// if(i == 0) printf("%f = %f*%f+%f\n", y[i], a, vx, vy);
}
}
}
}
__global__ static void benchmark_timedReduction(const int num_repeats, bool enabled, const float *input, float *output, clock_t *timer)
{
// __shared__ float shared[2 * blockDim.x];
extern __shared__ float shared[];
if(enabled)
{
for(int j = 0; j < num_repeats; ++ j)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
if (tid == 0)
{
timer[bid] = clock();
__store_op(&timer[bid], OP_READ);
}
// Copy input.
shared[tid] = input[tid];
__store_op(&input[tid], OP_READ);
__store_op(&shared[tid], OP_WRITE);
shared[tid + blockDim.x] = input[tid + blockDim.x];
__store_op(&input[tid + blockDim.x], OP_READ);
__store_op(&shared[tid + blockDim.x], OP_WRITE);
// Perform reduction to find minimum.
for (int d = blockDim.x; d > 0; d /= 2)
{
__syncthreads();
if (tid < d)
{
float f0 = shared[tid];
__store_op(&shared[tid], OP_READ);
float f1 = shared[tid + d];
__store_op(&shared[tid + d], OP_READ);
if (f1 < f0)
{
shared[tid] = f1;
__store_op(&shared[tid], OP_WRITE);
}
}
}
// Write result.
if (tid == 0)
{
output[bid] = shared[0];
__store_op(&shared[0], OP_READ);
__store_op(&output[bid], OP_WRITE);
}
__syncthreads();
if (tid == 0)
{
timer[bid+gridDim.x] = clock();
__store_op(&timer[bid+gridDim.x], OP_WRITE);
}
}
}
else
{
for(int j = 0; j < num_repeats; ++ j)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
if (tid == 0) timer[bid] = clock();
// Copy input.
shared[tid] = input[tid];
shared[tid + blockDim.x] = input[tid + blockDim.x];
// Perform reduction to find minimum.
for (int d = blockDim.x; d > 0; d /= 2)
{
__syncthreads();
if (tid < d)
{
float f0 = shared[tid];
float f1 = shared[tid + d];
if (f1 < f0)
{
shared[tid] = f1;
}
}
}
// Write result.
if (tid == 0) output[bid] = shared[0];
__syncthreads();
if (tid == 0) timer[bid+gridDim.x] = clock();
}
}
}
/*static long now()
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
long out;
out = ts.tv_nsec + ts.tv_sec * 1000000000;
return out;
}*/
int main(int argc, char* argv[])
{
if(argc != 6)
{
fprintf(stderr, "%s BENCHMARK INSTRUMENTED THREADS BLOCKS REPEATS\n", argv[0]);
fprintf(stderr, " BENCHMARK:\n");
fprintf(stderr, " 1 - Mem write\n");
fprintf(stderr, " 2 - Saxpy\n");
fprintf(stderr, " 3 - Clock (cuda sample)\n");
fprintf(stderr, " INSTRUMENTED:\n");
fprintf(stderr, " 0 - false\n");
fprintf(stderr, " 1 - true\n");
fprintf(stderr, " THREADS: threads per threadblock\n");
fprintf(stderr, " BLOCKS: threadblocks\n");
fprintf(stderr, " REPEATS: number of repeats\n");
return 1;
}
const int TEST_ID = atoi(argv[1]);
const int ENABLED = atoi(argv[2]);
const int NUM_THREADS = atoi(argv[3]);
const int NUM_BLOCKS = atoi(argv[4]);
const int NUM_TOTAL = NUM_THREADS * NUM_BLOCKS;
const int NUM_REPEATS = atoi(argv[5]);
printf("@TestId:\t%i\n", TEST_ID);
printf("@Enabled:\t%i\n", ENABLED);
printf("@Blocks:\t%i\n", NUM_BLOCKS);
printf("@Threads:\t%i\n", NUM_THREADS);
printf("@Total:\t%i\n", NUM_TOTAL);
printf("@Repeats:\t%i\n", NUM_REPEATS);
int enabled = ENABLED > 0;
hipEvent_t start, stop;
std::auto_ptr<Impl> impl;
switch(TEST_ID)
{
case 1:
{
const long expected = NUM_REPEATS * NUM_TOTAL / WARP_SIZE;
impl.reset(new Impl((enabled) ? expected : 0));
// Launch the kernel.
unsigned int* dev_data;
checkCudaErrors(hipMalloc(&dev_data, sizeof(unsigned int) * NUM_TOTAL));
checkCudaErrors(hipMemset(dev_data, 0, sizeof(unsigned int) * NUM_TOTAL));
unsigned int* host_data = (unsigned int*)malloc(sizeof(unsigned int) * NUM_TOTAL);
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
hipEventRecord(start);
hipLaunchKernelGGL(( benchmark_memwrite), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, NUM_REPEATS, enabled, dev_data);
hipEventRecord(stop);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(host_data, dev_data, sizeof(unsigned int) * NUM_TOTAL, hipMemcpyDeviceToHost));
for(int i = 0; i < NUM_TOTAL; ++ i)
{
if(host_data[i] != NUM_REPEATS)
{
fprintf(stderr, "Error at index: %i\n", i);
exit(-1);
}
}
}
break;
case 2:
{
const long expected = 3 * NUM_REPEATS * NUM_TOTAL / WARP_SIZE;
impl.reset(new Impl((enabled) ? expected : 0));
int N = NUM_TOTAL;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
float bx = 1.0f, by = 2.0f;
for (int i = 0; i < N; i++) {
x[i] = bx;
y[i] = by;
}
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
float a = 2.0f;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( benchmark_saxpy), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, NUM_REPEATS, enabled, N, a, d_x, d_y);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipDeviceSynchronize());
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
checkCudaErrors(hipDeviceSynchronize());
float value = by;
for(int i = 0; i < NUM_REPEATS; ++ i)
value = a * bx + value;
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-value));
if(maxError > 0.1)
printf("@Error: true\n");
}
break;
case 3: // cuda sample '0_Simple/clock'
{
const long expected = NUM_REPEATS * NUM_TOTAL / WARP_SIZE;
impl.reset(new Impl((enabled) ? expected : 0));
// Launch the kernel.
float *dinput = NULL;
float *doutput = NULL;
clock_t *dtimer = NULL;
clock_t timer[NUM_BLOCKS * 2];
float input[NUM_THREADS * 2];
for (int i = 0; i < NUM_THREADS * 2; i++)
{
input[i] = (float)i;
}
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipMalloc((void **)&dinput, sizeof(float) * NUM_THREADS * 2));
checkCudaErrors(hipMalloc((void **)&doutput, sizeof(float) * NUM_BLOCKS));
checkCudaErrors(hipMalloc((void **)&dtimer, sizeof(clock_t) * NUM_BLOCKS * 2));
checkCudaErrors(hipMemcpy(dinput, input, sizeof(float) * NUM_THREADS * 2, hipMemcpyHostToDevice));
hipEventRecord(start);
hipLaunchKernelGGL(( benchmark_timedReduction), dim3(NUM_BLOCKS), dim3(NUM_THREADS), sizeof(float) * 2 *NUM_THREADS, 0, NUM_REPEATS, enabled, dinput, doutput, dtimer);
hipEventRecord(stop);
checkCudaErrors(hipMemcpy(timer, dtimer, sizeof(clock_t) * NUM_BLOCKS * 2, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(dinput));
checkCudaErrors(hipFree(doutput));
checkCudaErrors(hipFree(dtimer));
// Compute the difference between the last block end and the first block start.
clock_t minStart = timer[0];
clock_t maxEnd = timer[NUM_BLOCKS];
for (int i = 1; i < NUM_BLOCKS; i++)
{
minStart = timer[i] < minStart ? timer[i] : minStart;
maxEnd = timer[NUM_BLOCKS+i] > maxEnd ? timer[NUM_BLOCKS+i] : maxEnd;
}
printf("Total clocks = %Lf\n", (long double)(maxEnd - minStart));
}
break;
default:
fprintf(stderr, "Unknown test case: %i\n", TEST_ID);
break;
}
hipEventSynchronize(stop);
float ms = 0;
hipEventElapsedTime(&ms, start, stop);
printf("@Elapsed:\t%f\n", ms);
return 0;
}
| bae5082d884b37aee6a421cd5e34617aae1fedad.cu | #include <iostream>
#include <unistd.h>
#include <cuda_runtime.h>
#include <stdint.h>
#include <memory>
#include "debug.h"
#include "hooklib.h"
#include "protocol.hpp"
#include "slimfast.hpp"
#include "consumer.hpp"
#include "impl.hpp"
#include "devlogger.hpp"
__global__ void benchmark_memwrite(const int num_repeats, bool enabled, volatile unsigned int* data)
{
int id = ((blockDim.x * blockIdx.x) + threadIdx.x);
if(enabled)
{
for(int i = 0; i < num_repeats; ++i )
{
data[id] += 1;
__store_op((void*)&data[id], OP_READWRITE);
}
__threadfence_system(); // FLUSH INSTRUMENTATION
}
else
{
for(int i = 0; i < num_repeats; ++i )
{
data[id] += 1;
}
}
}
__global__ void benchmark_saxpy(const int num_repeats, bool enabled, int N, float a, float *x, float *y)
{
if(enabled)
{
for(int j = 0; j < num_repeats; ++j )
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N)
{
float vx = x[i];
__store_op(&x[i], OP_READ);
float vy = y[i];
__store_op(&y[i], OP_READ);
y[i] = a*vx + vy;
__store_op(&y[i], OP_WRITE);
}
}
__threadfence_system(); // FLUSH INSTRUMENTATION
}
else
{
for(int j = 0; j < num_repeats; ++j )
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < N)
{
float vx = x[i];
float vy = y[i];
y[i] = a*vx + vy;
// if(i == 0) printf("%f = %f*%f+%f\n", y[i], a, vx, vy);
}
}
}
}
__global__ static void benchmark_timedReduction(const int num_repeats, bool enabled, const float *input, float *output, clock_t *timer)
{
// __shared__ float shared[2 * blockDim.x];
extern __shared__ float shared[];
if(enabled)
{
for(int j = 0; j < num_repeats; ++ j)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
if (tid == 0)
{
timer[bid] = clock();
__store_op(&timer[bid], OP_READ);
}
// Copy input.
shared[tid] = input[tid];
__store_op(&input[tid], OP_READ);
__store_op(&shared[tid], OP_WRITE);
shared[tid + blockDim.x] = input[tid + blockDim.x];
__store_op(&input[tid + blockDim.x], OP_READ);
__store_op(&shared[tid + blockDim.x], OP_WRITE);
// Perform reduction to find minimum.
for (int d = blockDim.x; d > 0; d /= 2)
{
__syncthreads();
if (tid < d)
{
float f0 = shared[tid];
__store_op(&shared[tid], OP_READ);
float f1 = shared[tid + d];
__store_op(&shared[tid + d], OP_READ);
if (f1 < f0)
{
shared[tid] = f1;
__store_op(&shared[tid], OP_WRITE);
}
}
}
// Write result.
if (tid == 0)
{
output[bid] = shared[0];
__store_op(&shared[0], OP_READ);
__store_op(&output[bid], OP_WRITE);
}
__syncthreads();
if (tid == 0)
{
timer[bid+gridDim.x] = clock();
__store_op(&timer[bid+gridDim.x], OP_WRITE);
}
}
}
else
{
for(int j = 0; j < num_repeats; ++ j)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
if (tid == 0) timer[bid] = clock();
// Copy input.
shared[tid] = input[tid];
shared[tid + blockDim.x] = input[tid + blockDim.x];
// Perform reduction to find minimum.
for (int d = blockDim.x; d > 0; d /= 2)
{
__syncthreads();
if (tid < d)
{
float f0 = shared[tid];
float f1 = shared[tid + d];
if (f1 < f0)
{
shared[tid] = f1;
}
}
}
// Write result.
if (tid == 0) output[bid] = shared[0];
__syncthreads();
if (tid == 0) timer[bid+gridDim.x] = clock();
}
}
}
/*static long now()
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
long out;
out = ts.tv_nsec + ts.tv_sec * 1000000000;
return out;
}*/
int main(int argc, char* argv[])
{
if(argc != 6)
{
fprintf(stderr, "%s BENCHMARK INSTRUMENTED THREADS BLOCKS REPEATS\n", argv[0]);
fprintf(stderr, " BENCHMARK:\n");
fprintf(stderr, " 1 - Mem write\n");
fprintf(stderr, " 2 - Saxpy\n");
fprintf(stderr, " 3 - Clock (cuda sample)\n");
fprintf(stderr, " INSTRUMENTED:\n");
fprintf(stderr, " 0 - false\n");
fprintf(stderr, " 1 - true\n");
fprintf(stderr, " THREADS: threads per threadblock\n");
fprintf(stderr, " BLOCKS: threadblocks\n");
fprintf(stderr, " REPEATS: number of repeats\n");
return 1;
}
const int TEST_ID = atoi(argv[1]);
const int ENABLED = atoi(argv[2]);
const int NUM_THREADS = atoi(argv[3]);
const int NUM_BLOCKS = atoi(argv[4]);
const int NUM_TOTAL = NUM_THREADS * NUM_BLOCKS;
const int NUM_REPEATS = atoi(argv[5]);
printf("@TestId:\t%i\n", TEST_ID);
printf("@Enabled:\t%i\n", ENABLED);
printf("@Blocks:\t%i\n", NUM_BLOCKS);
printf("@Threads:\t%i\n", NUM_THREADS);
printf("@Total:\t%i\n", NUM_TOTAL);
printf("@Repeats:\t%i\n", NUM_REPEATS);
int enabled = ENABLED > 0;
cudaEvent_t start, stop;
std::auto_ptr<Impl> impl;
switch(TEST_ID)
{
case 1:
{
const long expected = NUM_REPEATS * NUM_TOTAL / WARP_SIZE;
impl.reset(new Impl((enabled) ? expected : 0));
// Launch the kernel.
unsigned int* dev_data;
checkCudaErrors(cudaMalloc(&dev_data, sizeof(unsigned int) * NUM_TOTAL));
checkCudaErrors(cudaMemset(dev_data, 0, sizeof(unsigned int) * NUM_TOTAL));
unsigned int* host_data = (unsigned int*)malloc(sizeof(unsigned int) * NUM_TOTAL);
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
cudaEventRecord(start);
benchmark_memwrite<<<NUM_BLOCKS, NUM_THREADS>>>(NUM_REPEATS, enabled, dev_data);
cudaEventRecord(stop);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(host_data, dev_data, sizeof(unsigned int) * NUM_TOTAL, cudaMemcpyDeviceToHost));
for(int i = 0; i < NUM_TOTAL; ++ i)
{
if(host_data[i] != NUM_REPEATS)
{
fprintf(stderr, "Error at index: %i\n", i);
exit(-1);
}
}
}
break;
case 2:
{
const long expected = 3 * NUM_REPEATS * NUM_TOTAL / WARP_SIZE;
impl.reset(new Impl((enabled) ? expected : 0));
int N = NUM_TOTAL;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
float bx = 1.0f, by = 2.0f;
for (int i = 0; i < N; i++) {
x[i] = bx;
y[i] = by;
}
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
float a = 2.0f;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start));
benchmark_saxpy<<<NUM_BLOCKS, NUM_THREADS>>>(NUM_REPEATS, enabled, N, a, d_x, d_y);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaDeviceSynchronize());
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
checkCudaErrors(cudaDeviceSynchronize());
float value = by;
for(int i = 0; i < NUM_REPEATS; ++ i)
value = a * bx + value;
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-value));
if(maxError > 0.1)
printf("@Error: true\n");
}
break;
case 3: // cuda sample '0_Simple/clock'
{
const long expected = NUM_REPEATS * NUM_TOTAL / WARP_SIZE;
impl.reset(new Impl((enabled) ? expected : 0));
// Launch the kernel.
float *dinput = NULL;
float *doutput = NULL;
clock_t *dtimer = NULL;
clock_t timer[NUM_BLOCKS * 2];
float input[NUM_THREADS * 2];
for (int i = 0; i < NUM_THREADS * 2; i++)
{
input[i] = (float)i;
}
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaMalloc((void **)&dinput, sizeof(float) * NUM_THREADS * 2));
checkCudaErrors(cudaMalloc((void **)&doutput, sizeof(float) * NUM_BLOCKS));
checkCudaErrors(cudaMalloc((void **)&dtimer, sizeof(clock_t) * NUM_BLOCKS * 2));
checkCudaErrors(cudaMemcpy(dinput, input, sizeof(float) * NUM_THREADS * 2, cudaMemcpyHostToDevice));
cudaEventRecord(start);
benchmark_timedReduction<<<NUM_BLOCKS, NUM_THREADS, sizeof(float) * 2 *NUM_THREADS>>>(NUM_REPEATS, enabled, dinput, doutput, dtimer);
cudaEventRecord(stop);
checkCudaErrors(cudaMemcpy(timer, dtimer, sizeof(clock_t) * NUM_BLOCKS * 2, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(dinput));
checkCudaErrors(cudaFree(doutput));
checkCudaErrors(cudaFree(dtimer));
// Compute the difference between the last block end and the first block start.
clock_t minStart = timer[0];
clock_t maxEnd = timer[NUM_BLOCKS];
for (int i = 1; i < NUM_BLOCKS; i++)
{
minStart = timer[i] < minStart ? timer[i] : minStart;
maxEnd = timer[NUM_BLOCKS+i] > maxEnd ? timer[NUM_BLOCKS+i] : maxEnd;
}
printf("Total clocks = %Lf\n", (long double)(maxEnd - minStart));
}
break;
default:
fprintf(stderr, "Unknown test case: %i\n", TEST_ID);
break;
}
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
printf("@Elapsed:\t%f\n", ms);
return 0;
}
|
7fb03110583bec5ffaf055fd9c55d0617b2c93e5.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include "./host_device_vector.h"
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "device_helpers_hip.cuh"
namespace xgboost {
// the handler to call instead of hipSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
// wrapper over access with useful methods
class Permissions {
GPUAccess access_;
explicit Permissions(GPUAccess access) : access_(access) {}
public:
Permissions() : access_(GPUAccess::kNone) {}
explicit Permissions(bool perm)
: access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {}
bool CanRead() const { return access_ >= kRead; }
bool CanWrite() const { return access_ == kWrite; }
bool CanAccess(GPUAccess access) const { return access_ >= access; }
void Grant(GPUAccess access) { access_ = ::max(access_, access); }
void DenyComplementary(GPUAccess compl_access) {
access_ = ::min(access_, GPUAccess::kWrite - compl_access);
}
Permissions Complementary() const {
return Permissions(GPUAccess::kWrite - access_);
}
};
template <typename T>
struct HostDeviceVectorImpl {
struct DeviceShard {
DeviceShard()
: proper_size_(0), device_(-1), start_(0), perm_d_(false),
cached_size_(~0), vec_(nullptr) {}
void Init(HostDeviceVectorImpl<T>* vec, int device) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = device;
// TODO(rongou): remove pointer dereference once CUDA 10.1 is fixed.
LazyResize((*vec_).Size());
perm_d_ = vec_->perm_h_.Complementary();
}
void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = other.device_;
cached_size_ = other.cached_size_;
start_ = other.start_;
proper_size_ = other.proper_size_;
SetDevice();
data_.resize(other.data_.size());
perm_d_ = other.perm_d_;
}
void ScatterFrom(const T* begin) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_.data().get(), begin + start_,
data_.size() * sizeof(T), hipMemcpyDefault));
}
void GatherTo(thrust::device_ptr<T> begin) {
LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(hipMemcpyAsync(begin.get() + start_, data_.data().get(),
proper_size_ * sizeof(T), hipMemcpyDefault));
}
void Fill(T v) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
thrust::fill(data_.begin(), data_.end(), v);
}
void Copy(DeviceShard* other) {
// TODO(canonizer): avoid full copy of host data for this (but not for other)
LazySyncDevice(GPUAccess::kWrite);
other->LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_.data().get(), other->data_.data().get(),
data_.size() * sizeof(T), hipMemcpyDefault));
}
void LazySyncHost(GPUAccess access) {
SetDevice();
dh::safe_cuda(hipMemcpy(vec_->data_h_.data() + start_,
data_.data().get(), proper_size_ * sizeof(T),
hipMemcpyDeviceToHost));
perm_d_.DenyComplementary(access);
}
void LazyResize(size_t new_size) {
if (new_size == cached_size_) { return; }
// resize is required
int ndevices = vec_->distribution_.devices_.Size();
int device_index = vec_->distribution_.devices_.Index(device_);
start_ = vec_->distribution_.ShardStart(new_size, device_index);
proper_size_ = vec_->distribution_.ShardProperSize(new_size, device_index);
// The size on this device.
size_t size_d = vec_->distribution_.ShardSize(new_size, device_index);
SetDevice();
data_.resize(size_d);
cached_size_ = new_size;
}
void LazySyncDevice(GPUAccess access) {
if (perm_d_.CanAccess(access)) { return; }
if (perm_d_.CanRead()) {
// deny read to the host
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
return;
}
// data is on the host
size_t size_h = vec_->data_h_.size();
LazyResize(size_h);
SetDevice();
dh::safe_cuda(
hipMemcpy(data_.data().get(), vec_->data_h_.data() + start_,
data_.size() * sizeof(T), hipMemcpyHostToDevice));
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
vec_->size_d_ = size_h;
}
void SetDevice() {
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(hipSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
}
int device_;
thrust::device_vector<T> data_;
// cached vector size
size_t cached_size_;
size_t start_;
// size of the portion to copy back to the host
size_t proper_size_;
Permissions perm_d_;
HostDeviceVectorImpl<T>* vec_;
};
HostDeviceVectorImpl(size_t size, T v, GPUDistribution distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = size;
InitShards();
Fill(v);
} else {
data_h_.resize(size, v);
}
}
// required, as a new std::mutex has to be created
HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other)
: data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_),
distribution_(other.distribution_), mutex_() {
shards_.resize(other.shards_.size());
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, other.shards_.at(i));
});
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, GPUDistribution distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = init.size();
InitShards();
Copy(init);
} else {
data_h_ = init;
}
}
void InitShards() {
int ndevices = distribution_.devices_.Size();
shards_.resize(ndevices);
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, distribution_.devices_.DeviceId(i));
});
}
size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; }
GPUSet Devices() const { return distribution_.devices_; }
const GPUDistribution& Distribution() const { return distribution_; }
T* DevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return shards_.at(distribution_.devices_.Index(device)).data_.data().get();
}
const T* ConstDevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).data_.data().get();
}
common::Span<T> DeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return {shards_.at(devices.Index(device)).data_.data().get(),
static_cast<typename common::Span<T>::index_type>(DeviceSize(device))};
}
common::Span<const T> ConstDeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return {shards_.at(devices.Index(device)).data_.data().get(),
static_cast<typename common::Span<const T>::index_type>(DeviceSize(device))};
}
size_t DeviceSize(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).data_.size();
}
size_t DeviceStart(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).start_;
}
thrust::device_ptr<T> tbegin(int device) { // NOLINT
return thrust::device_ptr<T>(DevicePointer(device));
}
thrust::device_ptr<const T> tcbegin(int device) { // NOLINT
return thrust::device_ptr<const T>(ConstDevicePointer(device));
}
thrust::device_ptr<T> tend(int device) { // NOLINT
return tbegin(device) + DeviceSize(device);
}
thrust::device_ptr<const T> tcend(int device) { // NOLINT
return tcbegin(device) + DeviceSize(device);
}
void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(hipMemcpy(data_h_.data(), begin.get(),
(end - begin) * sizeof(T),
hipMemcpyDeviceToHost));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(begin.get());
});
}
}
void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(hipMemcpy(begin.get(), data_h_.data(),
data_h_.size() * sizeof(T),
hipMemcpyHostToDevice));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.GatherTo(begin); });
}
}
void Fill(T v) {
if (perm_h_.CanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Fill(v); });
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
// Data is on host.
if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
// Data is on device;
if (distribution_ != other->distribution_) {
distribution_ = GPUDistribution();
Reshard(other->Distribution());
size_d_ = other->size_d_;
}
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Copy(&other->shards_.at(i));
});
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.data());
});
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.begin());
});
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kWrite);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void Reshard(const GPUDistribution& distribution) {
if (distribution_ == distribution) { return; }
CHECK(distribution_.IsEmpty() || distribution.IsEmpty());
if (distribution.IsEmpty()) {
LazySyncHost(GPUAccess::kWrite);
}
distribution_ = distribution;
InitShards();
}
void Reshard(GPUSet new_devices) {
if (distribution_.Devices() == new_devices) { return; }
Reshard(GPUDistribution::Block(new_devices));
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if (distribution_.IsFixedSize()) {
CHECK_EQ(new_size, distribution_.offsets_.back());
}
if (Size() == 0 && !distribution_.IsEmpty()) {
// fast on-device resize
perm_h_ = Permissions(false);
size_d_ = new_size;
InitShards();
Fill(v);
} else {
// resize on host
LazySyncHost(GPUAccess::kWrite);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (perm_h_.CanAccess(access)) { return; }
if (perm_h_.CanRead()) {
// data is present, just need to deny access to the device
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.perm_d_.DenyComplementary(access);
});
perm_h_.Grant(access);
return;
}
if (data_h_.size() != size_d_) { data_h_.resize(size_d_); }
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.LazySyncHost(access);
});
perm_h_.Grant(access);
}
void LazySyncDevice(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
CHECK(devices.Contains(device));
shards_.at(devices.Index(device)).LazySyncDevice(access);
}
bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); }
bool DeviceCanAccess(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
if (!devices.Contains(device)) { return false; }
return shards_.at(devices.Index(device)).perm_d_.CanAccess(access);
}
std::vector<T> data_h_;
Permissions perm_h_;
// the total size of the data stored on the devices
size_t size_d_;
GPUDistribution distribution_;
// protects size_d_ and perm_h_ when updated from multiple threads
std::mutex mutex_;
std::vector<DeviceShard> shards_;
};
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(size_t size, T v, GPUDistribution distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(size, v, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(std::initializer_list<T> init, GPUDistribution distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(const std::vector<T>& init, GPUDistribution distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other)
: impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(*other.impl_);
}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=
(const HostDeviceVector<T>& other) {
if (this == &other) { return *this; }
delete impl_;
impl_ = new HostDeviceVectorImpl<T>(*other.impl_);
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); }
template <typename T>
const GPUDistribution& HostDeviceVector<T>::Distribution() const {
return impl_->Distribution();
}
template <typename T>
T* HostDeviceVector<T>::DevicePointer(int device) {
return impl_->DevicePointer(device);
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer(int device) const {
return impl_->ConstDevicePointer(device);
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) {
return impl_->DeviceSpan(device);
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const {
return impl_->ConstDeviceSpan(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceStart(int device) const {
return impl_->DeviceStart(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceSize(int device) const {
return impl_->DeviceSize(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT
return impl_->tbegin(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT
return impl_->tcbegin(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT
return impl_->tend(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT
return impl_->tcend(device);
}
template <typename T>
void HostDeviceVector<T>::ScatterFrom
(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
impl_->ScatterFrom(begin, end);
}
template <typename T>
void HostDeviceVector<T>::GatherTo
(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const {
impl_->GatherTo(begin, end);
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const {
return impl_->HostCanAccess(access);
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const {
return impl_->DeviceCanAccess(device, access);
}
template <typename T>
void HostDeviceVector<T>::Reshard(GPUSet new_devices) const {
impl_->Reshard(new_devices);
}
template <typename T>
void HostDeviceVector<T>::Reshard(const GPUDistribution& distribution) const {
impl_->Reshard(distribution);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<size_t>;
} // namespace xgboost
| 7fb03110583bec5ffaf055fd9c55d0617b2c93e5.cu | /*!
* Copyright 2017 XGBoost contributors
*/
#include "./host_device_vector.h"
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "./device_helpers.cuh"
namespace xgboost {
// the handler to call instead of cudaSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
// wrapper over access with useful methods
class Permissions {
GPUAccess access_;
explicit Permissions(GPUAccess access) : access_(access) {}
public:
Permissions() : access_(GPUAccess::kNone) {}
explicit Permissions(bool perm)
: access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {}
bool CanRead() const { return access_ >= kRead; }
bool CanWrite() const { return access_ == kWrite; }
bool CanAccess(GPUAccess access) const { return access_ >= access; }
void Grant(GPUAccess access) { access_ = std::max(access_, access); }
void DenyComplementary(GPUAccess compl_access) {
access_ = std::min(access_, GPUAccess::kWrite - compl_access);
}
Permissions Complementary() const {
return Permissions(GPUAccess::kWrite - access_);
}
};
template <typename T>
struct HostDeviceVectorImpl {
struct DeviceShard {
DeviceShard()
: proper_size_(0), device_(-1), start_(0), perm_d_(false),
cached_size_(~0), vec_(nullptr) {}
void Init(HostDeviceVectorImpl<T>* vec, int device) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = device;
// TODO(rongou): remove pointer dereference once CUDA 10.1 is fixed.
LazyResize((*vec_).Size());
perm_d_ = vec_->perm_h_.Complementary();
}
void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = other.device_;
cached_size_ = other.cached_size_;
start_ = other.start_;
proper_size_ = other.proper_size_;
SetDevice();
data_.resize(other.data_.size());
perm_d_ = other.perm_d_;
}
void ScatterFrom(const T* begin) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_.data().get(), begin + start_,
data_.size() * sizeof(T), cudaMemcpyDefault));
}
void GatherTo(thrust::device_ptr<T> begin) {
LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(begin.get() + start_, data_.data().get(),
proper_size_ * sizeof(T), cudaMemcpyDefault));
}
void Fill(T v) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
thrust::fill(data_.begin(), data_.end(), v);
}
void Copy(DeviceShard* other) {
// TODO(canonizer): avoid full copy of host data for this (but not for other)
LazySyncDevice(GPUAccess::kWrite);
other->LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_.data().get(), other->data_.data().get(),
data_.size() * sizeof(T), cudaMemcpyDefault));
}
void LazySyncHost(GPUAccess access) {
SetDevice();
dh::safe_cuda(cudaMemcpy(vec_->data_h_.data() + start_,
data_.data().get(), proper_size_ * sizeof(T),
cudaMemcpyDeviceToHost));
perm_d_.DenyComplementary(access);
}
void LazyResize(size_t new_size) {
if (new_size == cached_size_) { return; }
// resize is required
int ndevices = vec_->distribution_.devices_.Size();
int device_index = vec_->distribution_.devices_.Index(device_);
start_ = vec_->distribution_.ShardStart(new_size, device_index);
proper_size_ = vec_->distribution_.ShardProperSize(new_size, device_index);
// The size on this device.
size_t size_d = vec_->distribution_.ShardSize(new_size, device_index);
SetDevice();
data_.resize(size_d);
cached_size_ = new_size;
}
void LazySyncDevice(GPUAccess access) {
if (perm_d_.CanAccess(access)) { return; }
if (perm_d_.CanRead()) {
// deny read to the host
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
return;
}
// data is on the host
size_t size_h = vec_->data_h_.size();
LazyResize(size_h);
SetDevice();
dh::safe_cuda(
cudaMemcpy(data_.data().get(), vec_->data_h_.data() + start_,
data_.size() * sizeof(T), cudaMemcpyHostToDevice));
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
vec_->size_d_ = size_h;
}
void SetDevice() {
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(cudaSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
}
int device_;
thrust::device_vector<T> data_;
// cached vector size
size_t cached_size_;
size_t start_;
// size of the portion to copy back to the host
size_t proper_size_;
Permissions perm_d_;
HostDeviceVectorImpl<T>* vec_;
};
HostDeviceVectorImpl(size_t size, T v, GPUDistribution distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = size;
InitShards();
Fill(v);
} else {
data_h_.resize(size, v);
}
}
// required, as a new std::mutex has to be created
HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other)
: data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_),
distribution_(other.distribution_), mutex_() {
shards_.resize(other.shards_.size());
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, other.shards_.at(i));
});
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, GPUDistribution distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = init.size();
InitShards();
Copy(init);
} else {
data_h_ = init;
}
}
void InitShards() {
int ndevices = distribution_.devices_.Size();
shards_.resize(ndevices);
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, distribution_.devices_.DeviceId(i));
});
}
size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; }
GPUSet Devices() const { return distribution_.devices_; }
const GPUDistribution& Distribution() const { return distribution_; }
T* DevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return shards_.at(distribution_.devices_.Index(device)).data_.data().get();
}
const T* ConstDevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).data_.data().get();
}
common::Span<T> DeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return {shards_.at(devices.Index(device)).data_.data().get(),
static_cast<typename common::Span<T>::index_type>(DeviceSize(device))};
}
common::Span<const T> ConstDeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return {shards_.at(devices.Index(device)).data_.data().get(),
static_cast<typename common::Span<const T>::index_type>(DeviceSize(device))};
}
size_t DeviceSize(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).data_.size();
}
size_t DeviceStart(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).start_;
}
thrust::device_ptr<T> tbegin(int device) { // NOLINT
return thrust::device_ptr<T>(DevicePointer(device));
}
thrust::device_ptr<const T> tcbegin(int device) { // NOLINT
return thrust::device_ptr<const T>(ConstDevicePointer(device));
}
thrust::device_ptr<T> tend(int device) { // NOLINT
return tbegin(device) + DeviceSize(device);
}
thrust::device_ptr<const T> tcend(int device) { // NOLINT
return tcbegin(device) + DeviceSize(device);
}
void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(cudaMemcpy(data_h_.data(), begin.get(),
(end - begin) * sizeof(T),
cudaMemcpyDeviceToHost));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(begin.get());
});
}
}
void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(cudaMemcpy(begin.get(), data_h_.data(),
data_h_.size() * sizeof(T),
cudaMemcpyHostToDevice));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.GatherTo(begin); });
}
}
void Fill(T v) {
if (perm_h_.CanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Fill(v); });
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
// Data is on host.
if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
// Data is on device;
if (distribution_ != other->distribution_) {
distribution_ = GPUDistribution();
Reshard(other->Distribution());
size_d_ = other->size_d_;
}
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Copy(&other->shards_.at(i));
});
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.data());
});
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.begin());
});
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kWrite);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void Reshard(const GPUDistribution& distribution) {
if (distribution_ == distribution) { return; }
CHECK(distribution_.IsEmpty() || distribution.IsEmpty());
if (distribution.IsEmpty()) {
LazySyncHost(GPUAccess::kWrite);
}
distribution_ = distribution;
InitShards();
}
void Reshard(GPUSet new_devices) {
if (distribution_.Devices() == new_devices) { return; }
Reshard(GPUDistribution::Block(new_devices));
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if (distribution_.IsFixedSize()) {
CHECK_EQ(new_size, distribution_.offsets_.back());
}
if (Size() == 0 && !distribution_.IsEmpty()) {
// fast on-device resize
perm_h_ = Permissions(false);
size_d_ = new_size;
InitShards();
Fill(v);
} else {
// resize on host
LazySyncHost(GPUAccess::kWrite);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (perm_h_.CanAccess(access)) { return; }
if (perm_h_.CanRead()) {
// data is present, just need to deny access to the device
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.perm_d_.DenyComplementary(access);
});
perm_h_.Grant(access);
return;
}
if (data_h_.size() != size_d_) { data_h_.resize(size_d_); }
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.LazySyncHost(access);
});
perm_h_.Grant(access);
}
void LazySyncDevice(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
CHECK(devices.Contains(device));
shards_.at(devices.Index(device)).LazySyncDevice(access);
}
bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); }
bool DeviceCanAccess(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
if (!devices.Contains(device)) { return false; }
return shards_.at(devices.Index(device)).perm_d_.CanAccess(access);
}
std::vector<T> data_h_;
Permissions perm_h_;
// the total size of the data stored on the devices
size_t size_d_;
GPUDistribution distribution_;
// protects size_d_ and perm_h_ when updated from multiple threads
std::mutex mutex_;
std::vector<DeviceShard> shards_;
};
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(size_t size, T v, GPUDistribution distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(size, v, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(std::initializer_list<T> init, GPUDistribution distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(const std::vector<T>& init, GPUDistribution distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other)
: impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(*other.impl_);
}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=
(const HostDeviceVector<T>& other) {
if (this == &other) { return *this; }
delete impl_;
impl_ = new HostDeviceVectorImpl<T>(*other.impl_);
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); }
template <typename T>
const GPUDistribution& HostDeviceVector<T>::Distribution() const {
return impl_->Distribution();
}
template <typename T>
T* HostDeviceVector<T>::DevicePointer(int device) {
return impl_->DevicePointer(device);
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer(int device) const {
return impl_->ConstDevicePointer(device);
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) {
return impl_->DeviceSpan(device);
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const {
return impl_->ConstDeviceSpan(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceStart(int device) const {
return impl_->DeviceStart(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceSize(int device) const {
return impl_->DeviceSize(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT
return impl_->tbegin(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT
return impl_->tcbegin(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT
return impl_->tend(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT
return impl_->tcend(device);
}
template <typename T>
void HostDeviceVector<T>::ScatterFrom
(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
impl_->ScatterFrom(begin, end);
}
template <typename T>
void HostDeviceVector<T>::GatherTo
(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const {
impl_->GatherTo(begin, end);
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const {
return impl_->HostCanAccess(access);
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const {
return impl_->DeviceCanAccess(device, access);
}
template <typename T>
void HostDeviceVector<T>::Reshard(GPUSet new_devices) const {
impl_->Reshard(new_devices);
}
template <typename T>
void HostDeviceVector<T>::Reshard(const GPUDistribution& distribution) const {
impl_->Reshard(distribution);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<size_t>;
} // namespace xgboost
|
caba75c125065fde26a23a1d02c5928318462116.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef __cplusplus
extern "C" {
#endif
//===============================================================================================================================================================================================================200
// SET_DEVICE CODE
//===============================================================================================================================================================================================================200
//======================================================================================================================================================150
// INCLUDE/DEFINE
//======================================================================================================================================================150
#include "hip/hip_runtime.h" // (in library path specified to compiler)
//======================================================================================================================================================150
// FUNCTIONS
//======================================================================================================================================================150
//====================================================================================================100
// SET DEVICE
//====================================================================================================100
void setdevice(void){
// variables
int num_devices;
int device;
// work
hipGetDeviceCount(&num_devices);
if (num_devices > 1) {
// variables
int max_multiprocessors;
int max_device;
hipDeviceProp_t properties;
// initialize variables
max_multiprocessors = 0;
max_device = 0;
for (device = 0; device < num_devices; device++) {
hipGetDeviceProperties(&properties, device);
if (max_multiprocessors < properties.multiProcessorCount) {
max_multiprocessors = properties.multiProcessorCount;
max_device = device;
}
}
hipSetDevice(max_device);
}
}
//====================================================================================================100
// GET LAST ERROR
//====================================================================================================100
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
// fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
printf("Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
fflush(NULL);
exit(EXIT_FAILURE);
}
}
//===============================================================================================================================================================================================================200
// END
//===============================================================================================================================================================================================================200
#ifdef __cplusplus
}
#endif
| caba75c125065fde26a23a1d02c5928318462116.cu | #ifdef __cplusplus
extern "C" {
#endif
//===============================================================================================================================================================================================================200
// SET_DEVICE CODE
//===============================================================================================================================================================================================================200
//======================================================================================================================================================150
// INCLUDE/DEFINE
//======================================================================================================================================================150
#include "cuda.h" // (in library path specified to compiler)
//======================================================================================================================================================150
// FUNCTIONS
//======================================================================================================================================================150
//====================================================================================================100
// SET DEVICE
//====================================================================================================100
void setdevice(void){
// variables
int num_devices;
int device;
// work
cudaGetDeviceCount(&num_devices);
if (num_devices > 1) {
// variables
int max_multiprocessors;
int max_device;
cudaDeviceProp properties;
// initialize variables
max_multiprocessors = 0;
max_device = 0;
for (device = 0; device < num_devices; device++) {
cudaGetDeviceProperties(&properties, device);
if (max_multiprocessors < properties.multiProcessorCount) {
max_multiprocessors = properties.multiProcessorCount;
max_device = device;
}
}
cudaSetDevice(max_device);
}
}
//====================================================================================================100
// GET LAST ERROR
//====================================================================================================100
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
// fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
printf("Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
fflush(NULL);
exit(EXIT_FAILURE);
}
}
//===============================================================================================================================================================================================================200
// END
//===============================================================================================================================================================================================================200
#ifdef __cplusplus
}
#endif
|
87d8c5a86208ee4a2068fe2fc20d5ed686cd3e61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <algorithm>
#include <stdio.h>
#include "commonFunction.h"
#include "TestFunction.h"
#define ARRAY_SIZE 49
#include<cmath>
using namespace std;
__global__ void calculateSimilarity3(float* c, const float *a, const int NA,
const int NB, const int NMax);
hipError_t calculateSimilarityWithCuda3(float* c, const float *a,
const int NA, const int NB, const int NMax, string fileName);
/**
int main()
{
const int NA = 7;
const int NB = 7;
const int NMax = 1;
hipError_t cudaStatus;
float A[NA*NB*NMax] = {4};
float C[NMax];
cudaStatus = calculateSimilarityWithCuda3(C, A,NA,NB, NMax,
"../calculateSimilarityTimeResult/calculateSimilarity3.txt"");
//print out C for correctness checking
printf("C[] array is %.2f\n", C[0]);
testFunction(A, NA, NB, NMax, 1000,10000,1000,
"../calculateSimilarityTimeResult/calculateSimilarity1.txt,
&calculateSimilarityWithCuda3);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
return 0;
}
**/
/**
Algorithm:
(1) Sort the elements of the atom match matri into order of decreasing similiarity
(not necessary because we will need to find max anyway)
(2) Scan the atom match matrix to find the remaining pair of atoms, one from A
and one from B, that has the largest calculated value for S(i,j)
(3) Store the rsulting equivalences as a tuple of the form [A(i) <-> B(j); S9i,j)]
(4) Remove A(i) and B(j) from further consideration
(5) Return to step 2 if it is possible to map further atoms in A to atoms in B
input:
array of float c: containing result of total of NA max_element over NA
array of float a: containing coordinates NA*NB*NMax elements to find max_element
const int NA: number of atoms in molecule A
const int NB: number of atoms in each molecule in B
const int NMax: number of molecules in B
output:
void
**/
__global__ void calculateSimilarity3(float* c, const float *a, const int NA,
const int NB, const int NMax){
float* temp = new float[NA*NB];
float total;
int position;
int tid= blockIdx.x*blockDim.x+threadIdx.x;
// Each thread work on comparing 1 molecule of A to 1 molecule of B
// If we have NMax molecule B, we need NMax threads
if (tid < NMax) {
// Copy the appropriate part of a big array into a small one.
for (int q = 0; q<NA*NB; q++) {
temp[q] = a[tid*NA*NB + q];
}
// Initialised each thread's total to 0
total = 0;
//loop through NA atoms of molecule A
for (int k =0;k<NA; k++) {
/**
Step 2: Scan the atom match matrix to find the remaining pair of
atoms, one from A and one from B, that has the largest
calculated value for S(i,j)
**/
// Find the max_element and position of max_element in the array of NA*NB float
position = 0;
float max = temp[0];
for (int t = 0; t<NA*NB; t++) {
if (temp[t] > max) {
max = temp[t];
position=t;
}
}
/**
Step 3: Store the rsulting equivalences as a tuple of the form
[A(i) <-> B(j); S9i,j)]
**/
// Sum the max into total
total = total + max;
// Get the position of max_element in 2D array
int a = position/NB; //y axis
int b = position%NB; // x axis
/**
Step 4: Remove A(i) and B(j) from further consideration
**/
// Set all the elements in the same row and column of max_element to 0
// set all elements in the same y axis of max = 0
for (int i =0; i<NB; i++ ) temp[a*NB+i] =0;
// set all elements in the same x axis of max = 0
for (int j =0; j<NA; j++) temp[j*NB+b] =0;
}
//The similiarity score is total/NA
c[tid] = total /NA;
}
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t calculateSimilarityWithCuda3(float* c, const float *a,
const int NA, const int NB, const int NMax, string fileName)
{
float *dev_a = 0;
float *dev_c = 0;
hipError_t cudaStatus;
hipEvent_t start, stop;
float milliseconds;
cudaStatus = hipEventCreate(&start);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipEventCreate(& start) failed! in scanWithCuda\n");
goto Error;
}
cudaStatus = hipEventCreate(&stop);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipEventCreate(& stop) failed! in scanWithCuda\n");
goto Error;
}
//Start recording time
cudaStatus = hipEventRecord(start);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipEventRecord(start) failed! in scanWithCuda\n");
goto Error;
}
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, NMax*sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed! for dev_c in scanWithCuda\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, NA*NB*NMax * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed! for dev_a in scanWithCuda\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, NA * NB *NMax* sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed for dev_a! in scanWithCuda");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( calculateSimilarity3), dim3(NMax/1024 +1), dim3(1024), 0, 0, dev_c, dev_a, NA, NB, NMax);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s in scanWithCuda\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel! in scanWithCuda\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, NMax*sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed for dev_c! in scanWithCuda`\n");
goto Error;
}
cudaStatus = hipEventRecord(stop);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipEventRecord(start) failed! in scanWithCuda\n");
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel! in scanWithCuda\n", cudaStatus);
goto Error;
}
cudaStatus = hipEventElapsedTime(&milliseconds, start, stop);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipEventElapsedTime failed! in scanWithCuda\n");
goto Error;
}
printf("elapsed time of scanning matrix of NA = %d, NB = %d, NMax = %d is %.4f milliseconds \n", NA,NB,NMax, milliseconds);
writeResult2File (NA, NB, NMax, milliseconds, "milliseconds", fileName);
Error:
hipFree(dev_c);
hipFree(dev_a);
return cudaStatus;
}
| 87d8c5a86208ee4a2068fe2fc20d5ed686cd3e61.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <algorithm>
#include <stdio.h>
#include "commonFunction.h"
#include "TestFunction.h"
#define ARRAY_SIZE 49
#include<cmath>
using namespace std;
__global__ void calculateSimilarity3(float* c, const float *a, const int NA,
const int NB, const int NMax);
cudaError_t calculateSimilarityWithCuda3(float* c, const float *a,
const int NA, const int NB, const int NMax, string fileName);
/**
int main()
{
const int NA = 7;
const int NB = 7;
const int NMax = 1;
cudaError_t cudaStatus;
float A[NA*NB*NMax] = {4};
float C[NMax];
cudaStatus = calculateSimilarityWithCuda3(C, A,NA,NB, NMax,
"../calculateSimilarityTimeResult/calculateSimilarity3.txt"");
//print out C for correctness checking
printf("C[] array is %.2f\n", C[0]);
testFunction(A, NA, NB, NMax, 1000,10000,1000,
"../calculateSimilarityTimeResult/calculateSimilarity1.txt,
&calculateSimilarityWithCuda3);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
return 0;
}
**/
/**
Algorithm:
(1) Sort the elements of the atom match matri into order of decreasing similiarity
(not necessary because we will need to find max anyway)
(2) Scan the atom match matrix to find the remaining pair of atoms, one from A
and one from B, that has the largest calculated value for S(i,j)
(3) Store the rsulting equivalences as a tuple of the form [A(i) <-> B(j); S9i,j)]
(4) Remove A(i) and B(j) from further consideration
(5) Return to step 2 if it is possible to map further atoms in A to atoms in B
input:
array of float c: containing result of total of NA max_element over NA
array of float a: containing coordinates NA*NB*NMax elements to find max_element
const int NA: number of atoms in molecule A
const int NB: number of atoms in each molecule in B
const int NMax: number of molecules in B
output:
void
**/
__global__ void calculateSimilarity3(float* c, const float *a, const int NA,
const int NB, const int NMax){
float* temp = new float[NA*NB];
float total;
int position;
int tid= blockIdx.x*blockDim.x+threadIdx.x;
// Each thread work on comparing 1 molecule of A to 1 molecule of B
// If we have NMax molecule B, we need NMax threads
if (tid < NMax) {
// Copy the appropriate part of a big array into a small one.
for (int q = 0; q<NA*NB; q++) {
temp[q] = a[tid*NA*NB + q];
}
// Initialised each thread's total to 0
total = 0;
//loop through NA atoms of molecule A
for (int k =0;k<NA; k++) {
/**
Step 2: Scan the atom match matrix to find the remaining pair of
atoms, one from A and one from B, that has the largest
calculated value for S(i,j)
**/
// Find the max_element and position of max_element in the array of NA*NB float
position = 0;
float max = temp[0];
for (int t = 0; t<NA*NB; t++) {
if (temp[t] > max) {
max = temp[t];
position=t;
}
}
/**
Step 3: Store the rsulting equivalences as a tuple of the form
[A(i) <-> B(j); S9i,j)]
**/
// Sum the max into total
total = total + max;
// Get the position of max_element in 2D array
int a = position/NB; //y axis
int b = position%NB; // x axis
/**
Step 4: Remove A(i) and B(j) from further consideration
**/
// Set all the elements in the same row and column of max_element to 0
// set all elements in the same y axis of max = 0
for (int i =0; i<NB; i++ ) temp[a*NB+i] =0;
// set all elements in the same x axis of max = 0
for (int j =0; j<NA; j++) temp[j*NB+b] =0;
}
//The similiarity score is total/NA
c[tid] = total /NA;
}
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t calculateSimilarityWithCuda3(float* c, const float *a,
const int NA, const int NB, const int NMax, string fileName)
{
float *dev_a = 0;
float *dev_c = 0;
cudaError_t cudaStatus;
cudaEvent_t start, stop;
float milliseconds;
cudaStatus = cudaEventCreate(&start);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaEventCreate(& start) failed! in scanWithCuda\n");
goto Error;
}
cudaStatus = cudaEventCreate(&stop);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaEventCreate(& stop) failed! in scanWithCuda\n");
goto Error;
}
//Start recording time
cudaStatus = cudaEventRecord(start);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaEventRecord(start) failed! in scanWithCuda\n");
goto Error;
}
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, NMax*sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! for dev_c in scanWithCuda\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, NA*NB*NMax * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! for dev_a in scanWithCuda\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, NA * NB *NMax* sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for dev_a! in scanWithCuda");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
calculateSimilarity3<<<NMax/1024 +1, 1024>>>(dev_c, dev_a, NA, NB, NMax);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s in scanWithCuda\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel! in scanWithCuda\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, NMax*sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for dev_c! in scanWithCuda`\n");
goto Error;
}
cudaStatus = cudaEventRecord(stop);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaEventRecord(start) failed! in scanWithCuda\n");
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel! in scanWithCuda\n", cudaStatus);
goto Error;
}
cudaStatus = cudaEventElapsedTime(&milliseconds, start, stop);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaEventElapsedTime failed! in scanWithCuda\n");
goto Error;
}
printf("elapsed time of scanning matrix of NA = %d, NB = %d, NMax = %d is %.4f milliseconds \n", NA,NB,NMax, milliseconds);
writeResult2File (NA, NB, NMax, milliseconds, "milliseconds", fileName);
Error:
cudaFree(dev_c);
cudaFree(dev_a);
return cudaStatus;
}
|
945bb067dec0f221b79eba8f3b0dab64d124896f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void g_countCellOcc(uint *_hash, uint *_cellOcc, uint _pixCount, uint _hashCellCount)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < _pixCount && _hash[idx] < _hashCellCount)
atomicAdd(&(_cellOcc[_hash[idx]]), 1);
} | 945bb067dec0f221b79eba8f3b0dab64d124896f.cu | #include "includes.h"
__global__ void g_countCellOcc(uint *_hash, uint *_cellOcc, uint _pixCount, uint _hashCellCount)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < _pixCount && _hash[idx] < _hashCellCount)
atomicAdd(&(_cellOcc[_hash[idx]]), 1);
} |
3761279f435ae89cc7640a75043359a59cfc534a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string.h>
#include <stdio.h>
#include <thread>
#include "apex_api.hpp"
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
hipError_t _status = apiFuncCall; \
if (_status != hipSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, hipGetErrorString(_status));\
exit(-1); \
} \
} while (0)
struct DataElement
{
char *name;
int value;
};
__global__
void Kernel(DataElement *elem) {
printf("On device: name=%s, value=%d\n", elem->name, elem->value);
elem->name[0] = 'd';
elem->value++;
}
void launch(DataElement *elem) {
APEX_SCOPED_TIMER;
hipLaunchKernelGGL(( Kernel), dim3(1), dim3(1) , 0, 0, elem);
RUNTIME_API_CALL(hipDeviceSynchronize());
}
int main(int argc, char * argv[])
{
APEX_UNUSED(argc);
APEX_UNUSED(argv);
apex::init("apex::cuda unit test", 0, 1);
apex::apex_options::use_screen_output(true);
DataElement *e;
RUNTIME_API_CALL(hipMallocManaged((void**)&e, sizeof(DataElement)));
e->value = 10;
RUNTIME_API_CALL(hipMallocManaged((void**)&(e->name), sizeof(char) * (strlen("hello") + 1) ));
strcpy(e->name, "hello");
std::vector<std::thread*> threads;
unsigned i;
//unsigned test_numthreads = apex::hardware_concurrency() - 1;
unsigned test_numthreads = 3;
for(i = 0 ; i < test_numthreads ; i++) {
std::thread * worker = new std::thread(launch,e);
threads.push_back(worker);
}
launch(e);
for(i = 0 ; i < test_numthreads ; i++) {
threads[i]->join();
}
printf("On host: name=%s, value=%d\n", e->name, e->value);
RUNTIME_API_CALL(hipFree(e->name));
RUNTIME_API_CALL(hipFree(e));
apex::finalize();
apex::cleanup();
}
| 3761279f435ae89cc7640a75043359a59cfc534a.cu | #include <string.h>
#include <stdio.h>
#include <thread>
#include "apex_api.hpp"
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
cudaError_t _status = apiFuncCall; \
if (_status != cudaSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status));\
exit(-1); \
} \
} while (0)
struct DataElement
{
char *name;
int value;
};
__global__
void Kernel(DataElement *elem) {
printf("On device: name=%s, value=%d\n", elem->name, elem->value);
elem->name[0] = 'd';
elem->value++;
}
void launch(DataElement *elem) {
APEX_SCOPED_TIMER;
Kernel<<< 1, 1 >>>(elem);
RUNTIME_API_CALL(cudaDeviceSynchronize());
}
int main(int argc, char * argv[])
{
APEX_UNUSED(argc);
APEX_UNUSED(argv);
apex::init("apex::cuda unit test", 0, 1);
apex::apex_options::use_screen_output(true);
DataElement *e;
RUNTIME_API_CALL(cudaMallocManaged((void**)&e, sizeof(DataElement)));
e->value = 10;
RUNTIME_API_CALL(cudaMallocManaged((void**)&(e->name), sizeof(char) * (strlen("hello") + 1) ));
strcpy(e->name, "hello");
std::vector<std::thread*> threads;
unsigned i;
//unsigned test_numthreads = apex::hardware_concurrency() - 1;
unsigned test_numthreads = 3;
for(i = 0 ; i < test_numthreads ; i++) {
std::thread * worker = new std::thread(launch,e);
threads.push_back(worker);
}
launch(e);
for(i = 0 ; i < test_numthreads ; i++) {
threads[i]->join();
}
printf("On host: name=%s, value=%d\n", e->name, e->value);
RUNTIME_API_CALL(cudaFree(e->name));
RUNTIME_API_CALL(cudaFree(e));
apex::finalize();
apex::cleanup();
}
|
8ac6ef6ccb9740cb2790cde764b1af3e658045ac.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=64 --gridDim=64 --no-inline
#include <hip/hip_runtime.h>
inline __device__ void f() __attribute__((always_inline));
inline __device__ void f() {
}
inline __device__ void g() __attribute__((always_inline));
inline __device__ void g() {
f();
}
__global__ void k() {
g();
}
| 8ac6ef6ccb9740cb2790cde764b1af3e658045ac.cu | //pass
//--blockDim=64 --gridDim=64 --no-inline
#include <cuda.h>
inline __device__ void f() __attribute__((always_inline));
inline __device__ void f() {
}
inline __device__ void g() __attribute__((always_inline));
inline __device__ void g() {
f();
}
__global__ void k() {
g();
}
|
6a7f04d04a6b8b8eeb3a0db2709812bd457be7cf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstring>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/utilities/error.hpp>
#include "./utilities.cuh"
#include "./utilities.hpp"
#include "char_types/char_cases.h"
#include "char_types/char_flags.h"
#include <rmm/rmm.h>
#include <rmm/rmm_api.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/transform_reduce.h>
#include <thrust/transform_scan.h>
#include <mutex>
namespace cudf {
namespace strings {
namespace detail {
// Used to build a temporary string_view object from a single host string.
std::unique_ptr<string_view, std::function<void(string_view*)>> string_from_host(
const char* str, hipStream_t stream)
{
if (!str) return nullptr;
auto length = std::strlen(str);
auto* d_str = new rmm::device_buffer(length, stream);
CUDA_TRY(hipMemcpyAsync(d_str->data(), str, length, hipMemcpyHostToDevice, stream));
CUDA_TRY(hipStreamSynchronize(stream));
auto deleter = [d_str](string_view* sv) { delete d_str; };
return std::unique_ptr<string_view, decltype(deleter)>{
new string_view(reinterpret_cast<char*>(d_str->data()), length), deleter};
}
// build a vector of string_view objects from a strings column
rmm::device_vector<string_view> create_string_vector_from_column(cudf::strings_column_view strings,
hipStream_t stream)
{
auto execpol = rmm::exec_policy(stream);
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
auto count = strings.size();
rmm::device_vector<string_view> strings_vector(count);
string_view* d_strings = strings_vector.data().get();
thrust::for_each_n(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
count,
[d_column, d_strings] __device__(size_type idx) {
if (d_column.is_null(idx))
d_strings[idx] = string_view(nullptr, 0);
else
d_strings[idx] = d_column.element<string_view>(idx);
});
return strings_vector;
}
// build a strings offsets column from a vector of string_views
std::unique_ptr<cudf::column> child_offsets_from_string_vector(
const rmm::device_vector<string_view>& strings,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto transformer = [] __device__(string_view v) { return v.size_bytes(); };
auto begin = thrust::make_transform_iterator(strings.begin(), transformer);
return make_offsets_child_column(begin, begin + strings.size(), mr, stream);
}
// build a strings chars column from an vector of string_views
std::unique_ptr<cudf::column> child_chars_from_string_vector(
const rmm::device_vector<string_view>& strings,
const int32_t* d_offsets,
cudf::size_type null_count,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
size_type count = strings.size();
auto d_strings = strings.data().get();
auto execpol = rmm::exec_policy(stream);
size_type bytes = thrust::device_pointer_cast(d_offsets)[count];
// create column
auto chars_column =
make_numeric_column(data_type{INT8}, bytes, mask_state::UNALLOCATED, stream, mr);
// get it's view
auto d_chars = chars_column->mutable_view().data<int8_t>();
thrust::for_each_n(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
count,
[d_strings, d_offsets, d_chars] __device__(size_type idx) {
string_view const d_str = d_strings[idx];
memcpy(d_chars + d_offsets[idx], d_str.data(), d_str.size_bytes());
});
return chars_column;
}
//
std::unique_ptr<column> create_chars_child_column(cudf::size_type strings_count,
cudf::size_type null_count,
cudf::size_type total_bytes,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_EXPECTS(null_count <= strings_count, "Invalid null count");
return make_numeric_column(data_type{INT8}, total_bytes, mask_state::UNALLOCATED, stream, mr);
}
//
std::unique_ptr<column> make_empty_strings_column(rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
return std::make_unique<column>(data_type{STRING},
0,
rmm::device_buffer{0, stream, mr}, // data
rmm::device_buffer{0, stream, mr},
0); // nulls
}
namespace {
// The device variables are created here to avoid using a singleton that may cause issues
// with RMM initialize/finalize. See PR #3159 for details on this approach.
__device__ character_flags_table_type
character_codepoint_flags[sizeof(g_character_codepoint_flags)];
__device__ character_cases_table_type character_cases_table[sizeof(g_character_cases_table)];
__device__ special_case_mapping character_special_case_mappings[sizeof(g_special_case_mappings)];
// initialization mutexes
std::mutex g_flags_table_mutex;
std::mutex g_cases_table_mutex;
std::mutex g_special_case_mappings_mutex;
character_flags_table_type* d_character_codepoint_flags = nullptr;
character_cases_table_type* d_character_cases_table = nullptr;
special_case_mapping* d_special_case_mappings = nullptr;
} // namespace
/**
* @copydoc cudf::strings::detail::get_character_flags_table
*/
const character_flags_table_type* get_character_flags_table()
{
std::lock_guard<std::mutex> guard(g_flags_table_mutex);
if (!d_character_codepoint_flags) {
CUDA_TRY(hipMemcpyToSymbol(
character_codepoint_flags, g_character_codepoint_flags, sizeof(g_character_codepoint_flags)));
CUDA_TRY(hipGetSymbolAddress((void**)&d_character_codepoint_flags, character_codepoint_flags));
}
return d_character_codepoint_flags;
}
/**
* @copydoc cudf::strings::detail::get_character_cases_table
*/
const character_cases_table_type* get_character_cases_table()
{
std::lock_guard<std::mutex> guard(g_cases_table_mutex);
if (!d_character_cases_table) {
CUDA_TRY(hipMemcpyToSymbol(
character_cases_table, g_character_cases_table, sizeof(g_character_cases_table)));
CUDA_TRY(hipGetSymbolAddress((void**)&d_character_cases_table, character_cases_table));
}
return d_character_cases_table;
}
/**
* @copydoc cudf::strings::detail::get_special_case_mapping_table
*/
const special_case_mapping* get_special_case_mapping_table()
{
std::lock_guard<std::mutex> guard(g_special_case_mappings_mutex);
if (!d_special_case_mappings) {
CUDA_TRY(hipMemcpyToSymbol(
character_special_case_mappings, g_special_case_mappings, sizeof(g_special_case_mappings)));
CUDA_TRY(
hipGetSymbolAddress((void**)&d_special_case_mappings, character_special_case_mappings));
}
return d_special_case_mappings;
}
} // namespace detail
} // namespace strings
} // namespace cudf
| 6a7f04d04a6b8b8eeb3a0db2709812bd457be7cf.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstring>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/utilities/error.hpp>
#include "./utilities.cuh"
#include "./utilities.hpp"
#include "char_types/char_cases.h"
#include "char_types/char_flags.h"
#include <rmm/rmm.h>
#include <rmm/rmm_api.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/transform_reduce.h>
#include <thrust/transform_scan.h>
#include <mutex>
namespace cudf {
namespace strings {
namespace detail {
// Used to build a temporary string_view object from a single host string.
std::unique_ptr<string_view, std::function<void(string_view*)>> string_from_host(
const char* str, cudaStream_t stream)
{
if (!str) return nullptr;
auto length = std::strlen(str);
auto* d_str = new rmm::device_buffer(length, stream);
CUDA_TRY(cudaMemcpyAsync(d_str->data(), str, length, cudaMemcpyHostToDevice, stream));
CUDA_TRY(cudaStreamSynchronize(stream));
auto deleter = [d_str](string_view* sv) { delete d_str; };
return std::unique_ptr<string_view, decltype(deleter)>{
new string_view(reinterpret_cast<char*>(d_str->data()), length), deleter};
}
// build a vector of string_view objects from a strings column
rmm::device_vector<string_view> create_string_vector_from_column(cudf::strings_column_view strings,
cudaStream_t stream)
{
auto execpol = rmm::exec_policy(stream);
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
auto count = strings.size();
rmm::device_vector<string_view> strings_vector(count);
string_view* d_strings = strings_vector.data().get();
thrust::for_each_n(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
count,
[d_column, d_strings] __device__(size_type idx) {
if (d_column.is_null(idx))
d_strings[idx] = string_view(nullptr, 0);
else
d_strings[idx] = d_column.element<string_view>(idx);
});
return strings_vector;
}
// build a strings offsets column from a vector of string_views
std::unique_ptr<cudf::column> child_offsets_from_string_vector(
const rmm::device_vector<string_view>& strings,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto transformer = [] __device__(string_view v) { return v.size_bytes(); };
auto begin = thrust::make_transform_iterator(strings.begin(), transformer);
return make_offsets_child_column(begin, begin + strings.size(), mr, stream);
}
// build a strings chars column from an vector of string_views
std::unique_ptr<cudf::column> child_chars_from_string_vector(
const rmm::device_vector<string_view>& strings,
const int32_t* d_offsets,
cudf::size_type null_count,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
size_type count = strings.size();
auto d_strings = strings.data().get();
auto execpol = rmm::exec_policy(stream);
size_type bytes = thrust::device_pointer_cast(d_offsets)[count];
// create column
auto chars_column =
make_numeric_column(data_type{INT8}, bytes, mask_state::UNALLOCATED, stream, mr);
// get it's view
auto d_chars = chars_column->mutable_view().data<int8_t>();
thrust::for_each_n(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
count,
[d_strings, d_offsets, d_chars] __device__(size_type idx) {
string_view const d_str = d_strings[idx];
memcpy(d_chars + d_offsets[idx], d_str.data(), d_str.size_bytes());
});
return chars_column;
}
//
std::unique_ptr<column> create_chars_child_column(cudf::size_type strings_count,
cudf::size_type null_count,
cudf::size_type total_bytes,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_EXPECTS(null_count <= strings_count, "Invalid null count");
return make_numeric_column(data_type{INT8}, total_bytes, mask_state::UNALLOCATED, stream, mr);
}
//
std::unique_ptr<column> make_empty_strings_column(rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
return std::make_unique<column>(data_type{STRING},
0,
rmm::device_buffer{0, stream, mr}, // data
rmm::device_buffer{0, stream, mr},
0); // nulls
}
namespace {
// The device variables are created here to avoid using a singleton that may cause issues
// with RMM initialize/finalize. See PR #3159 for details on this approach.
__device__ character_flags_table_type
character_codepoint_flags[sizeof(g_character_codepoint_flags)];
__device__ character_cases_table_type character_cases_table[sizeof(g_character_cases_table)];
__device__ special_case_mapping character_special_case_mappings[sizeof(g_special_case_mappings)];
// initialization mutexes
std::mutex g_flags_table_mutex;
std::mutex g_cases_table_mutex;
std::mutex g_special_case_mappings_mutex;
character_flags_table_type* d_character_codepoint_flags = nullptr;
character_cases_table_type* d_character_cases_table = nullptr;
special_case_mapping* d_special_case_mappings = nullptr;
} // namespace
/**
* @copydoc cudf::strings::detail::get_character_flags_table
*/
const character_flags_table_type* get_character_flags_table()
{
std::lock_guard<std::mutex> guard(g_flags_table_mutex);
if (!d_character_codepoint_flags) {
CUDA_TRY(cudaMemcpyToSymbol(
character_codepoint_flags, g_character_codepoint_flags, sizeof(g_character_codepoint_flags)));
CUDA_TRY(cudaGetSymbolAddress((void**)&d_character_codepoint_flags, character_codepoint_flags));
}
return d_character_codepoint_flags;
}
/**
* @copydoc cudf::strings::detail::get_character_cases_table
*/
const character_cases_table_type* get_character_cases_table()
{
std::lock_guard<std::mutex> guard(g_cases_table_mutex);
if (!d_character_cases_table) {
CUDA_TRY(cudaMemcpyToSymbol(
character_cases_table, g_character_cases_table, sizeof(g_character_cases_table)));
CUDA_TRY(cudaGetSymbolAddress((void**)&d_character_cases_table, character_cases_table));
}
return d_character_cases_table;
}
/**
* @copydoc cudf::strings::detail::get_special_case_mapping_table
*/
const special_case_mapping* get_special_case_mapping_table()
{
std::lock_guard<std::mutex> guard(g_special_case_mappings_mutex);
if (!d_special_case_mappings) {
CUDA_TRY(cudaMemcpyToSymbol(
character_special_case_mappings, g_special_case_mappings, sizeof(g_special_case_mappings)));
CUDA_TRY(
cudaGetSymbolAddress((void**)&d_special_case_mappings, character_special_case_mappings));
}
return d_special_case_mappings;
}
} // namespace detail
} // namespace strings
} // namespace cudf
|
f8acf8f9511486c0dde6a7627a1f41e5ecd93e6d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlarfg-v2.cu normal z -> c, Wed Sep 17 15:08:23 2014
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_c
__global__
void magma_clarfg_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx,
magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaFloatComplex scale;
float xnorm;
magmaFloatComplex dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_C_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
magmaFloatComplex alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
float beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
float alphar = MAGMA_C_REAL(alpha);
float alphai = MAGMA_C_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
float beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_C_MAKE(beta, 0.);
alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_C_MUL(dxi, scale);
} else {
*dtau = MAGMA_C_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfg_gpu( magma_int_t n, magmaFloatComplex *dx0, magmaFloatComplex *dx,
magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex *dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_scnrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_scnrm2_cols(n-1, 1, dx0+1, n, dxnorm);
hipLaunchKernelGGL(( magma_clarfg_gpu_kernel), dim3(blocks), dim3(threads),
0, magma_stream , n, dx0, dx, dtau, dxnorm, dAkk);
}
| f8acf8f9511486c0dde6a7627a1f41e5ecd93e6d.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlarfg-v2.cu normal z -> c, Wed Sep 17 15:08:23 2014
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_c
__global__
void magma_clarfg_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx,
magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaFloatComplex scale;
float xnorm;
magmaFloatComplex dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_C_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
magmaFloatComplex alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
float beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
float alphar = MAGMA_C_REAL(alpha);
float alphai = MAGMA_C_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
float beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_C_MAKE(beta, 0.);
alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_C_MUL(dxi, scale);
} else {
*dtau = MAGMA_C_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfg_gpu( magma_int_t n, magmaFloatComplex *dx0, magmaFloatComplex *dx,
magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex *dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_scnrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_scnrm2_cols(n-1, 1, dx0+1, n, dxnorm);
magma_clarfg_gpu_kernel<<< blocks, threads,
0, magma_stream >>>(n, dx0, dx, dtau, dxnorm, dAkk);
}
|
2a63059c8c9960b529ceaf774004acadfc6442ab.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i == 0)
{
C[i] = 1;
}
/*
if (i < numElements)
{
// C[((int)(&i) * i) % numElements] = i * i;
C[i * 16 % numElements] = i * i;
// C[i] = A[i] + B[i];
}
*/
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = 320;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
/*
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
*/
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
| 2a63059c8c9960b529ceaf774004acadfc6442ab.cu | /**
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i == 0)
{
C[i] = 1;
}
/*
if (i < numElements)
{
// C[((int)(&i) * i) % numElements] = i * i;
C[i * 16 % numElements] = i * i;
// C[i] = A[i] + B[i];
}
*/
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 320;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
/*
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
*/
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
00427ac3b310e16ceb3d502e88f50e7b0699d991.hip | // !!! This is a file automatically generated by hipify!!!
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Winter Semester 2015/2016, March 15 - April 15
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include "helper.h"
#include <cstdlib>
#include <iostream>
using std::stringstream;
using std::cerr;
using std::cout;
using std::endl;
using std::string;
// parameter processing: template specialization for T=bool
template<>
bool getParam<bool>(std::string param, bool &var, int argc, char **argv)
{
const char *c_param = param.c_str();
for(int i=argc-1; i>=1; i--)
{
if (argv[i][0]!='-') continue;
if (strcmp(argv[i]+1, c_param)==0)
{
if (!(i+1<argc) || argv[i+1][0]=='-') { var = true; return true; }
std::stringstream ss;
ss << argv[i+1];
ss >> var;
return (bool)ss;
}
}
return false;
}
// opencv helpers
void convert_layered_to_interleaved(float *aOut, const float *aIn, int w, int h, int nc)
{
if (nc==1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; }
size_t nOmega = (size_t)w*h;
for (int y=0; y<h; y++)
{
for (int x=0; x<w; x++)
{
for (int c=0; c<nc; c++)
{
aOut[(nc-1-c) + nc*(x + (size_t)w*y)] = aIn[x + (size_t)w*y + nOmega*c];
}
}
}
}
void convert_layered_to_mat(cv::Mat &mOut, const float *aIn)
{
convert_layered_to_interleaved((float*)mOut.data, aIn, mOut.cols, mOut.rows, mOut.channels());
}
void convert_interleaved_to_layered(float *aOut, const float *aIn, int w, int h, int nc)
{
if (nc==1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; }
size_t nOmega = (size_t)w*h;
for (int y=0; y<h; y++)
{
for (int x=0; x<w; x++)
{
for (int c=0; c<nc; c++)
{
aOut[x + (size_t)w*y + nOmega*c] = aIn[(nc-1-c) + nc*(x + (size_t)w*y)];
}
}
}
}
void convert_mat_to_layered(float *aOut, const cv::Mat &mIn)
{
convert_interleaved_to_layered(aOut, (float*)mIn.data, mIn.cols, mIn.rows, mIn.channels());
}
void showImage(string title, const cv::Mat &mat, int x, int y)
{
const char *wTitle = title.c_str();
cv::namedWindow(wTitle, CV_WINDOW_AUTOSIZE);
cvMoveWindow(wTitle, x, y);
cv::imshow(wTitle, mat);
}
void showHistogram256(const char *windowTitle, int *histogram, int windowX, int windowY)
{
const int nbins = 256;
cv::Mat canvas = cv::Mat::ones(125, 512, CV_8UC3);
float hmax = 0;
for(int i = 0; i < nbins; ++i)
hmax = max((int)hmax, histogram[i]);
for (int j = 0, rows = canvas.rows; j < nbins-1; j++)
{
for(int i = 0; i < 2; ++i)
cv::line(
canvas,
cv::Point(j*2+i, rows),
cv::Point(j*2+i, rows - (histogram[j] * 125.0f) / hmax),
cv::Scalar(255,128,0),
1, 8, 0
);
}
showImage(windowTitle, canvas, windowX, windowY);
}
// adding Gaussian noise
float noise(float sigma)
{
float x1 = (float)rand()/RAND_MAX;
float x2 = (float)rand()/RAND_MAX;
return sigma * sqrtf(-2*log(::max(x1,0.000001f)))*cosf(2*M_PI*x2);
}
void addNoise(cv::Mat &m, float sigma)
{
float *data = (float*)m.data;
int w = m.cols;
int h = m.rows;
int nc = m.channels();
size_t n = (size_t)w*h*nc;
for(size_t i=0; i<n; i++)
{
data[i] += noise(sigma);
}
}
// cuda error checking
string prev_file = "";
int prev_line = 0;
void cuda_check(string file, int line)
{
hipError_t e = hipGetLastError();
if (e != hipSuccess)
{
cout << endl << file << ", line " << line << ": " << hipGetErrorString(e) << " (" << e << ")" << endl;
if (prev_line>0) cout << "Previous CUDA call:" << endl << prev_file << ", line " << prev_line << endl;
exit(1);
}
prev_file = file;
prev_line = line;
}
| 00427ac3b310e16ceb3d502e88f50e7b0699d991.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Winter Semester 2015/2016, March 15 - April 15
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include "helper.h"
#include <cstdlib>
#include <iostream>
using std::stringstream;
using std::cerr;
using std::cout;
using std::endl;
using std::string;
// parameter processing: template specialization for T=bool
template<>
bool getParam<bool>(std::string param, bool &var, int argc, char **argv)
{
const char *c_param = param.c_str();
for(int i=argc-1; i>=1; i--)
{
if (argv[i][0]!='-') continue;
if (strcmp(argv[i]+1, c_param)==0)
{
if (!(i+1<argc) || argv[i+1][0]=='-') { var = true; return true; }
std::stringstream ss;
ss << argv[i+1];
ss >> var;
return (bool)ss;
}
}
return false;
}
// opencv helpers
void convert_layered_to_interleaved(float *aOut, const float *aIn, int w, int h, int nc)
{
if (nc==1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; }
size_t nOmega = (size_t)w*h;
for (int y=0; y<h; y++)
{
for (int x=0; x<w; x++)
{
for (int c=0; c<nc; c++)
{
aOut[(nc-1-c) + nc*(x + (size_t)w*y)] = aIn[x + (size_t)w*y + nOmega*c];
}
}
}
}
void convert_layered_to_mat(cv::Mat &mOut, const float *aIn)
{
convert_layered_to_interleaved((float*)mOut.data, aIn, mOut.cols, mOut.rows, mOut.channels());
}
void convert_interleaved_to_layered(float *aOut, const float *aIn, int w, int h, int nc)
{
if (nc==1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; }
size_t nOmega = (size_t)w*h;
for (int y=0; y<h; y++)
{
for (int x=0; x<w; x++)
{
for (int c=0; c<nc; c++)
{
aOut[x + (size_t)w*y + nOmega*c] = aIn[(nc-1-c) + nc*(x + (size_t)w*y)];
}
}
}
}
void convert_mat_to_layered(float *aOut, const cv::Mat &mIn)
{
convert_interleaved_to_layered(aOut, (float*)mIn.data, mIn.cols, mIn.rows, mIn.channels());
}
void showImage(string title, const cv::Mat &mat, int x, int y)
{
const char *wTitle = title.c_str();
cv::namedWindow(wTitle, CV_WINDOW_AUTOSIZE);
cvMoveWindow(wTitle, x, y);
cv::imshow(wTitle, mat);
}
void showHistogram256(const char *windowTitle, int *histogram, int windowX, int windowY)
{
const int nbins = 256;
cv::Mat canvas = cv::Mat::ones(125, 512, CV_8UC3);
float hmax = 0;
for(int i = 0; i < nbins; ++i)
hmax = max((int)hmax, histogram[i]);
for (int j = 0, rows = canvas.rows; j < nbins-1; j++)
{
for(int i = 0; i < 2; ++i)
cv::line(
canvas,
cv::Point(j*2+i, rows),
cv::Point(j*2+i, rows - (histogram[j] * 125.0f) / hmax),
cv::Scalar(255,128,0),
1, 8, 0
);
}
showImage(windowTitle, canvas, windowX, windowY);
}
// adding Gaussian noise
float noise(float sigma)
{
float x1 = (float)rand()/RAND_MAX;
float x2 = (float)rand()/RAND_MAX;
return sigma * sqrtf(-2*log(std::max(x1,0.000001f)))*cosf(2*M_PI*x2);
}
void addNoise(cv::Mat &m, float sigma)
{
float *data = (float*)m.data;
int w = m.cols;
int h = m.rows;
int nc = m.channels();
size_t n = (size_t)w*h*nc;
for(size_t i=0; i<n; i++)
{
data[i] += noise(sigma);
}
}
// cuda error checking
string prev_file = "";
int prev_line = 0;
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
if (prev_line>0) cout << "Previous CUDA call:" << endl << prev_file << ", line " << prev_line << endl;
exit(1);
}
prev_file = file;
prev_line = line;
}
|
4128e68ef88fbe3998cb9fd15b2cf01b0696991c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/split.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void forward_split_kernel(const int num, const int num_outputs_,
const int outer_size_,
const int inner_size_, const int i0,
const T *x, T *y) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int i1 = idx / inner_size_;
const int i2 = idx % inner_size_;
y[i1 * inner_size_ + i2] =
x[i1 * (inner_size_ * num_outputs_) + i0 * inner_size_ + i2];
}
}
template <typename T>
void SplitCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
for (int i0 = 0; i0 < this->num_outputs_; ++i0) {
T *y = outputs[i0]->cast_data_and_get_pointer<T>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
forward_split_kernel, this->inner_size_ * this->outer_size_,
this->num_outputs_, this->outer_size_, this->inner_size_, i0, x, y);
}
}
template <typename T, bool accum>
__global__ void backward_split_kernel(const int num, const int num_outputs_,
const int outer_size_,
const int inner_size_, const int i0,
T *dx, const T *dy) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int i1 = idx / inner_size_;
const int i2 = idx % inner_size_;
T &ref = dx[i1 * (inner_size_ * num_outputs_) + i0 * inner_size_ + i2];
ref = (accum ? ref : 0) + dy[i1 * inner_size_ + i2];
}
}
template <typename T>
void SplitCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0]) {
return;
}
cuda_set_device(std::stoi(this->ctx_.device_id));
T *dx = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_);
for (int i0 = 0; i0 < this->num_outputs_; ++i0) {
const T *dy = outputs[i0]->get_grad_pointer<T>(this->ctx_);
if (accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((backward_split_kernel<T, true>),
this->inner_size_ * this->outer_size_,
this->num_outputs_, this->outer_size_,
this->inner_size_, i0, dx, dy);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((backward_split_kernel<T, false>),
this->inner_size_ * this->outer_size_,
this->num_outputs_, this->outer_size_,
this->inner_size_, i0, dx, dy);
}
}
}
// template instantiation
template class SplitCuda<float>;
}
| 4128e68ef88fbe3998cb9fd15b2cf01b0696991c.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/split.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void forward_split_kernel(const int num, const int num_outputs_,
const int outer_size_,
const int inner_size_, const int i0,
const T *x, T *y) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int i1 = idx / inner_size_;
const int i2 = idx % inner_size_;
y[i1 * inner_size_ + i2] =
x[i1 * (inner_size_ * num_outputs_) + i0 * inner_size_ + i2];
}
}
template <typename T>
void SplitCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
for (int i0 = 0; i0 < this->num_outputs_; ++i0) {
T *y = outputs[i0]->cast_data_and_get_pointer<T>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
forward_split_kernel, this->inner_size_ * this->outer_size_,
this->num_outputs_, this->outer_size_, this->inner_size_, i0, x, y);
}
}
template <typename T, bool accum>
__global__ void backward_split_kernel(const int num, const int num_outputs_,
const int outer_size_,
const int inner_size_, const int i0,
T *dx, const T *dy) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int i1 = idx / inner_size_;
const int i2 = idx % inner_size_;
T &ref = dx[i1 * (inner_size_ * num_outputs_) + i0 * inner_size_ + i2];
ref = (accum ? ref : 0) + dy[i1 * inner_size_ + i2];
}
}
template <typename T>
void SplitCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0]) {
return;
}
cuda_set_device(std::stoi(this->ctx_.device_id));
T *dx = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_);
for (int i0 = 0; i0 < this->num_outputs_; ++i0) {
const T *dy = outputs[i0]->get_grad_pointer<T>(this->ctx_);
if (accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((backward_split_kernel<T, true>),
this->inner_size_ * this->outer_size_,
this->num_outputs_, this->outer_size_,
this->inner_size_, i0, dx, dy);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((backward_split_kernel<T, false>),
this->inner_size_ * this->outer_size_,
this->num_outputs_, this->outer_size_,
this->inner_size_, i0, dx, dy);
}
}
}
// template instantiation
template class SplitCuda<float>;
}
|
d705ba500d199386fbb8234ae6418c4c3c0eb004.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
template<typename scalar_t>
struct AddFunctor {
AddFunctor(scalar_t a): alpha(a) {}
__device__ __forceinline__ scalar_t operator() (const scalar_t a, const scalar_t b) const {
return a + alpha * b;
}
private:
scalar_t alpha;
};
void add_kernel_cuda(TensorIteratorBase& iter, const Scalar& alpha_scalar) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, kBFloat16, iter.common_dtype(), "add_cuda/sub_cuda", [&]() {
AddFunctor<scalar_t> f(alpha_scalar.to<scalar_t>());
gpu_kernel_with_scalars(iter, f);
});
}
static void sub_kernel_cuda(TensorIterator& iter, const Scalar& alpha_scalar) {
add_kernel_cuda(iter, -alpha_scalar);
}
REGISTER_DISPATCH(add_stub, &add_kernel_cuda);
REGISTER_DISPATCH(sub_stub, &sub_kernel_cuda);
}} // namespace at::native
| d705ba500d199386fbb8234ae6418c4c3c0eb004.cu | #include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
template<typename scalar_t>
struct AddFunctor {
AddFunctor(scalar_t a): alpha(a) {}
__device__ __forceinline__ scalar_t operator() (const scalar_t a, const scalar_t b) const {
return a + alpha * b;
}
private:
scalar_t alpha;
};
void add_kernel_cuda(TensorIteratorBase& iter, const Scalar& alpha_scalar) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, kBFloat16, iter.common_dtype(), "add_cuda/sub_cuda", [&]() {
AddFunctor<scalar_t> f(alpha_scalar.to<scalar_t>());
gpu_kernel_with_scalars(iter, f);
});
}
static void sub_kernel_cuda(TensorIterator& iter, const Scalar& alpha_scalar) {
add_kernel_cuda(iter, -alpha_scalar);
}
REGISTER_DISPATCH(add_stub, &add_kernel_cuda);
REGISTER_DISPATCH(sub_stub, &sub_kernel_cuda);
}} // namespace at::native
|
029b7f09f8ba766e4517ab69358a8700728206f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2008-2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "atom.h"
#include "cutoff.h"
#include "parboil.h"
#ifdef __DEVICE_EMULATION__
#define DEBUG
/* define which grid block and which thread to examine */
#define BX 0
#define BY 0
#define TX 0
#define TY 0
#define TZ 0
#define EMU(code) do { \
if (blockIdx.x==BX && blockIdx.y==BY && \
threadIdx.x==TX && threadIdx.y==TY && threadIdx.z==TZ) { \
code; \
} \
} while (0)
#define INT(n) printf("%s = %d\n", #n, n)
#define FLOAT(f) printf("%s = %g\n", #f, (double)(f))
#define INT3(n) printf("%s = %d %d %d\n", #n, (n).x, (n).y, (n).z)
#define FLOAT4(f) printf("%s = %g %g %g %g\n", #f, (double)(f).x, \
(double)(f).y, (double)(f).z, (double)(f).w)
#else
#define EMU(code)
#define INT(n)
#define FLOAT(f)
#define INT3(n)
#define FLOAT4(f)
#endif
/* report error from CUDA */
#define CUERR \
do { \
hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__); \
return -1; \
} \
} while (0)
/*
* neighbor list:
* stored in constant memory as table of offsets
* flat index addressing is computed by kernel
*
* reserve enough memory for 11^3 stencil of grid cells
* this fits within 16K of memory
*/
#define NBRLIST_DIM 11
#define NBRLIST_MAXLEN (NBRLIST_DIM * NBRLIST_DIM * NBRLIST_DIM)
__constant__ int NbrListLen;
__constant__ int3 NbrList[NBRLIST_MAXLEN];
/* Normally, we're summing electrostatic potential. However, for
* profiling we may want to appropriate this storage to count the
* number of nearby atoms, instead.
*/
#undef NEIGHBOR_COUNT
#ifndef NEIGHBOR_COUNT
typedef float ener_t;
#else
typedef int ener_t;
#endif
/*
* atom bins cached into shared memory for processing
*
* this reserves 4K of shared memory for 32 atom bins each containing 8 atoms,
* should permit scheduling of up to 3 thread blocks per SM
*/
#define BIN_DEPTH 8 /* max number of atoms per bin */
#define BIN_SIZE 32 /* size of bin in floats */
#define BIN_SHIFT 5 /* # of bits to shift for mul/div by BIN_SIZE */
#define BIN_CACHE_MAXLEN 32 /* max number of atom bins to cache */
#define BIN_LENGTH 4.f /* spatial length in Angstroms */
#define BIN_INVLEN (1.f / BIN_LENGTH)
/* assuming density of 1 atom / 10 A^3, expectation is 6.4 atoms per bin
* so that bin fill should be 80% (for non-empty regions of space) */
#define REGION_SIZE 512 /* number of floats in lattice region */
/*
* potential lattice is decomposed into size 8^3 lattice point "regions"
*
* THIS IMPLEMENTATION: one thread per lattice point
* thread block size 128 gives 4 thread blocks per region
* kernel is invoked for each x-y plane of regions,
* where gridDim.x is 4*(x region dimension) so that blockIdx.x
* can absorb the z sub-region index in its 2 lowest order bits
*
* Regions are stored contiguously in memory in row-major order
*
* The bins have to not only cover the region, but they need to surround
* the outer edges so that region sides and corners can still use
* neighbor list stencil. The binZeroAddr is actually a shifted pointer into
* the bin array (binZeroAddr = binBaseAddr + (c*binDim_y + c)*binDim_x + c)
* where c = ceil(cutoff / binsize). This allows for negative offsets to
* be added to myBinIndex.
*
* The (0,0,0) spatial origin corresponds to lower left corner of both
* regionZeroAddr and binZeroAddr. The atom coordinates are translated
* during binning to enforce this assumption.
*/
__global__ static void cuda_cutoff_potential_lattice6overlap(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
ener_t *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex
)
{
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ ener_t *myRegionAddr;
__shared__ int3 myBinIndex;
const int xRegionIndex = blockIdx.x;
const int yRegionIndex = blockIdx.y;
/* thread id */
const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x
+ threadIdx.x;
/* blockDim.x == 8, blockDim.y == 2, blockDim.z == 8 */
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*gridDim.y
+ yRegionIndex)*gridDim.x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + threadIdx.x) * h;
float y = (8 * yRegionIndex + threadIdx.y) * h;
float z = (8 * zRegionIndex + threadIdx.z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
#ifndef NEIGHBOR_COUNT
ener_t energy0 = 0.f;
ener_t energy1 = 0.f;
ener_t energy2 = 0.f;
ener_t energy3 = 0.f;
#else
ener_t energy0 = 0, energy1 = 0, energy2 = 0, energy3 = 0;
#endif
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr)
+ (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
__syncthreads();
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
int i;
for (i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
#else
energy0 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
#else
energy1 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
#else
energy2 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
#else
energy3 += (r2 < cutoff2);
#endif
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
__syncthreads();
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
extern "C" int gpu_compute_cutoff_potential_lattice6overlap(
struct pb_TimerSet *timers, /* for measuring execution time */
Lattice *lattice,
float cutoff, /* cutoff distance */
Atoms *atoms, /* array of atoms */
int verbose /* print info/debug messages */
)
{
int nx = lattice->dim.nx;
int ny = lattice->dim.ny;
int nz = lattice->dim.nz;
float xlo = lattice->dim.lo.x;
float ylo = lattice->dim.lo.y;
float zlo = lattice->dim.lo.z;
float h = lattice->dim.h;
int natoms = atoms->size;
Atom *atom = atoms->atoms;
int3 nbrlist[NBRLIST_MAXLEN];
int nbrlistlen = 0;
int binHistoFull[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int binHistoCover[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int num_excluded = 0;
int xRegionDim, yRegionDim, zRegionDim;
int xRegionIndex, yRegionIndex, zRegionIndex;
int xOffset, yOffset, zOffset;
int lnx, lny, lnz, lnall;
ener_t *regionZeroAddr, *thisRegion;
ener_t *regionZeroCuda;
int index, indexRegion;
int c;
int3 binDim;
int nbins;
float4 *binBaseAddr, *binZeroAddr;
float4 *binBaseCuda, *binZeroCuda;
int *bincntBaseAddr, *bincntZeroAddr;
Atoms *extra = NULL;
int i, j, k, n;
int sum, total;
float avgFillFull, avgFillCover;
const float cutoff2 = cutoff * cutoff;
const float inv_cutoff2 = 1.f / cutoff2;
dim3 gridDim, blockDim;
#ifdef NEIGHBOR_COUNT
double neighbor_count = 0; /* used to profile the number of atoms near a
* lattice point */
#endif
// Caller has made the 'compute' timer active
/* pad lattice to be factor of 8 in each dimension */
xRegionDim = (int) ceilf(nx/8.f);
yRegionDim = (int) ceilf(ny/8.f);
zRegionDim = (int) ceilf(nz/8.f);
lnx = 8 * xRegionDim;
lny = 8 * yRegionDim;
lnz = 8 * zRegionDim;
lnall = lnx * lny * lnz;
/* will receive energies from CUDA */
regionZeroAddr = (ener_t *) malloc(lnall * sizeof(float));
/* create bins */
c = (int) ceil(cutoff * BIN_INVLEN); /* count extra bins around lattice */
binDim.x = (int) ceil(lnx * h * BIN_INVLEN) + 2*c;
binDim.y = (int) ceil(lny * h * BIN_INVLEN) + 2*c;
binDim.z = (int) ceil(lnz * h * BIN_INVLEN) + 2*c;
nbins = binDim.x * binDim.y * binDim.z;
binBaseAddr = (float4 *) calloc(nbins * BIN_DEPTH, sizeof(float4));
binZeroAddr = binBaseAddr + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
bincntBaseAddr = (int *) calloc(nbins, sizeof(int));
bincntZeroAddr = bincntBaseAddr + (c * binDim.y + c) * binDim.x + c;
/* create neighbor list */
if (ceilf(BIN_LENGTH / (8*h)) == floorf(BIN_LENGTH / (8*h))) {
float s = sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 1 cell */
if (2*c + 1 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-1)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else if (8*h <= 2*BIN_LENGTH) {
float s = 2.f*sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 3-cube of cells */
if (2*c + 3 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-3)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else {
fprintf(stderr, "must have h <= %f\n", 0.25 * BIN_LENGTH);
return -1;
}
/* perform geometric hashing of atoms into bins */
{
/* array of extra atoms, permit average of one extra per bin */
Atom *extra_atoms = (Atom *) calloc(nbins, sizeof(Atom));
int extra_len = 0;
for (n = 0; n < natoms; n++) {
float4 p;
p.x = atom[n].x - xlo;
p.y = atom[n].y - ylo;
p.z = atom[n].z - zlo;
p.w = atom[n].q;
i = (int) floorf(p.x * BIN_INVLEN);
j = (int) floorf(p.y * BIN_INVLEN);
k = (int) floorf(p.z * BIN_INVLEN);
if (i >= -c && i < binDim.x - c &&
j >= -c && j < binDim.y - c &&
k >= -c && k < binDim.z - c &&
atom[n].q != 0) {
int index = (k * binDim.y + j) * binDim.x + i;
float4 *bin = binZeroAddr + index * BIN_DEPTH;
int bindex = bincntZeroAddr[index];
if (bindex < BIN_DEPTH) {
/* copy atom into bin and increase counter for this bin */
bin[bindex] = p;
bincntZeroAddr[index]++;
}
else {
/* add index to array of extra atoms to be computed with CPU */
if (extra_len >= nbins) {
fprintf(stderr, "exceeded space for storing extra atoms\n");
return -1;
}
extra_atoms[extra_len] = atom[n];
extra_len++;
}
}
else {
/* excluded atoms are either outside bins or neutrally charged */
num_excluded++;
}
}
/* Save result */
extra = (Atoms *)malloc(sizeof(Atoms));
extra->atoms = extra_atoms;
extra->size = extra_len;
}
/* bin stats */
sum = total = 0;
for (n = 0; n < nbins; n++) {
binHistoFull[ bincntBaseAddr[n] ]++;
sum += bincntBaseAddr[n];
total += BIN_DEPTH;
}
avgFillFull = sum / (float) total;
sum = total = 0;
for (k = 0; k < binDim.z - 2*c; k++) {
for (j = 0; j < binDim.y - 2*c; j++) {
for (i = 0; i < binDim.x - 2*c; i++) {
int index = (k * binDim.y + j) * binDim.x + i;
binHistoCover[ bincntZeroAddr[index] ]++;
sum += bincntZeroAddr[index];
total += BIN_DEPTH;
}
}
}
avgFillCover = sum / (float) total;
if (verbose) {
/* report */
printf("number of atoms = %d\n", natoms);
printf("lattice spacing = %g\n", h);
printf("cutoff distance = %g\n", cutoff);
printf("\n");
printf("requested lattice dimensions = %d %d %d\n", nx, ny, nz);
printf("requested space dimensions = %g %g %g\n", nx*h, ny*h, nz*h);
printf("expanded lattice dimensions = %d %d %d\n", lnx, lny, lnz);
printf("expanded space dimensions = %g %g %g\n", lnx*h, lny*h, lnz*h);
printf("number of bytes for lattice data = %u\n", lnall*sizeof(float));
printf("\n");
printf("bin padding thickness = %d\n", c);
printf("bin cover dimensions = %d %d %d\n",
binDim.x - 2*c, binDim.y - 2*c, binDim.z - 2*c);
printf("bin full dimensions = %d %d %d\n", binDim.x, binDim.y, binDim.z);
printf("number of bins = %d\n", nbins);
printf("total number of atom slots = %d\n", nbins * BIN_DEPTH);
printf("%% overhead space = %g\n",
(natoms / (double) (nbins * BIN_DEPTH)) * 100);
printf("number of bytes for bin data = %u\n",
nbins * BIN_DEPTH * sizeof(float4));
printf("\n");
printf("bin histogram with padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoFull[n]);
sum += binHistoFull[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillFull * 100);
printf("\n");
printf("bin histogram excluding padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoCover[n]);
sum += binHistoCover[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillCover * 100);
printf("\n");
printf("number of extra atoms = %d\n", extra->size);
printf("%% atoms that are extra = %g\n", (extra->size / (double) natoms) * 100);
printf("\n");
/* sanity check on bins */
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoFull[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram with edges: "
"sum + others = %d\n", sum);
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoCover[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram excluding edges: "
"sum + others = %d\n", sum);
printf("\n");
/* neighbor list */
printf("neighbor list length = %d\n", nbrlistlen);
printf("\n");
}
/* setup CUDA kernel parameters */
gridDim.x = xRegionDim;
gridDim.y = yRegionDim;
gridDim.z = 1;
blockDim.x = 8;
blockDim.y = 2;
blockDim.z = 8;
/* allocate and initialize memory on CUDA device */
pb_SwitchToTimer(timers, pb_TimerID_COPY);
if (verbose) {
printf("Allocating %.2fMB on CUDA device for potentials\n",
lnall * sizeof(float) / (double) (1024*1024));
}
hipMalloc((void **) ®ionZeroCuda, lnall * sizeof(ener_t));
CUERR;
hipMemset(regionZeroCuda, 0, lnall * sizeof(ener_t));
CUERR;
if (verbose) {
printf("Allocating %.2fMB on CUDA device for atom bins\n",
nbins * BIN_DEPTH * sizeof(float4) / (double) (1024*1024));
}
hipMalloc((void **) &binBaseCuda, nbins * BIN_DEPTH * sizeof(float4));
CUERR;
hipMemcpy(binBaseCuda, binBaseAddr, nbins * BIN_DEPTH * sizeof(float4),
hipMemcpyHostToDevice);
CUERR;
binZeroCuda = binBaseCuda + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
hipMemcpyToSymbol(NbrListLen, &nbrlistlen, sizeof(int), 0);
CUERR;
hipMemcpyToSymbol(NbrList, nbrlist, nbrlistlen * sizeof(int3), 0);
CUERR;
if (verbose)
printf("\n");
hipStream_t cutoffstream;
hipStreamCreate(&cutoffstream);
/* loop over z-dimension, invoke CUDA kernel for each x-y plane */
pb_SwitchToTimer(timers, pb_TimerID_KERNEL);
printf("Invoking CUDA kernel on %d region planes...\n", zRegionDim);
for (zRegionIndex = 0; zRegionIndex < zRegionDim; zRegionIndex++) {
printf(" computing plane %d\r", zRegionIndex);
fflush(stdout);
hipLaunchKernelGGL(( cuda_cutoff_potential_lattice6overlap), dim3(gridDim), dim3(blockDim), 0, 0, binDim.x, binDim.y,
binZeroCuda, h, cutoff2, inv_cutoff2, regionZeroCuda, zRegionIndex);
}
/*
* handle extra atoms on the CPU, concurrently with the GPU calculations
*/
pb_SwitchToTimer(timers, pb_TimerID_COMPUTE);
if (extra->size > 0) {
printf("computing extra atoms on CPU\n");
if (cpu_compute_cutoff_potential_lattice(lattice, cutoff, extra)) {
fprintf(stderr, "cpu_compute_cutoff_potential_lattice() failed "
"for extra atoms\n");
return -1;
}
printf("\n");
}
hipStreamSynchronize(cutoffstream);
CUERR;
hipDeviceSynchronize();
hipStreamDestroy(cutoffstream);
printf("Finished CUDA kernel calls \n");
/* copy result regions from CUDA device */
pb_SwitchToTimer(timers, pb_TimerID_COPY);
hipMemcpy(regionZeroAddr, regionZeroCuda, lnall * sizeof(ener_t),
hipMemcpyDeviceToHost);
CUERR;
/* free CUDA memory allocations */
hipFree(regionZeroCuda);
hipFree(binBaseCuda);
/*
* transpose on CPU, updating, producing the final lattice
*/
/* transpose regions back into lattice */
pb_SwitchToTimer(timers, pb_TimerID_COMPUTE);
for (k = 0; k < nz; k++) {
zRegionIndex = (k >> 3);
zOffset = (k & 7);
for (j = 0; j < ny; j++) {
yRegionIndex = (j >> 3);
yOffset = (j & 7);
for (i = 0; i < nx; i++) {
xRegionIndex = (i >> 3);
xOffset = (i & 7);
thisRegion = regionZeroAddr
+ ((zRegionIndex * yRegionDim + yRegionIndex) * xRegionDim
+ xRegionIndex) * REGION_SIZE;
indexRegion = (zOffset * 8 + yOffset) * 8 + xOffset;
index = (k * ny + j) * nx + i;
#ifndef NEIGHBOR_COUNT
lattice->lattice[index] += thisRegion[indexRegion];
#else
neighbor_count += thisRegion[indexRegion];
#endif
}
}
}
#ifdef NEIGHBOR_COUNT
printf("Neighbor count: %f\n", (float)neighbor_count);
#endif
/* cleanup memory allocations */
free(regionZeroAddr);
free(binBaseAddr);
free(bincntBaseAddr);
free_atom(extra);
return 0;
}
| 029b7f09f8ba766e4517ab69358a8700728206f5.cu | /***************************************************************************
*cr
*cr (C) Copyright 2008-2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "atom.h"
#include "cutoff.h"
#include "parboil.h"
#ifdef __DEVICE_EMULATION__
#define DEBUG
/* define which grid block and which thread to examine */
#define BX 0
#define BY 0
#define TX 0
#define TY 0
#define TZ 0
#define EMU(code) do { \
if (blockIdx.x==BX && blockIdx.y==BY && \
threadIdx.x==TX && threadIdx.y==TY && threadIdx.z==TZ) { \
code; \
} \
} while (0)
#define INT(n) printf("%s = %d\n", #n, n)
#define FLOAT(f) printf("%s = %g\n", #f, (double)(f))
#define INT3(n) printf("%s = %d %d %d\n", #n, (n).x, (n).y, (n).z)
#define FLOAT4(f) printf("%s = %g %g %g %g\n", #f, (double)(f).x, \
(double)(f).y, (double)(f).z, (double)(f).w)
#else
#define EMU(code)
#define INT(n)
#define FLOAT(f)
#define INT3(n)
#define FLOAT4(f)
#endif
/* report error from CUDA */
#define CUERR \
do { \
cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \
return -1; \
} \
} while (0)
/*
* neighbor list:
* stored in constant memory as table of offsets
* flat index addressing is computed by kernel
*
* reserve enough memory for 11^3 stencil of grid cells
* this fits within 16K of memory
*/
#define NBRLIST_DIM 11
#define NBRLIST_MAXLEN (NBRLIST_DIM * NBRLIST_DIM * NBRLIST_DIM)
__constant__ int NbrListLen;
__constant__ int3 NbrList[NBRLIST_MAXLEN];
/* Normally, we're summing electrostatic potential. However, for
* profiling we may want to appropriate this storage to count the
* number of nearby atoms, instead.
*/
#undef NEIGHBOR_COUNT
#ifndef NEIGHBOR_COUNT
typedef float ener_t;
#else
typedef int ener_t;
#endif
/*
* atom bins cached into shared memory for processing
*
* this reserves 4K of shared memory for 32 atom bins each containing 8 atoms,
* should permit scheduling of up to 3 thread blocks per SM
*/
#define BIN_DEPTH 8 /* max number of atoms per bin */
#define BIN_SIZE 32 /* size of bin in floats */
#define BIN_SHIFT 5 /* # of bits to shift for mul/div by BIN_SIZE */
#define BIN_CACHE_MAXLEN 32 /* max number of atom bins to cache */
#define BIN_LENGTH 4.f /* spatial length in Angstroms */
#define BIN_INVLEN (1.f / BIN_LENGTH)
/* assuming density of 1 atom / 10 A^3, expectation is 6.4 atoms per bin
* so that bin fill should be 80% (for non-empty regions of space) */
#define REGION_SIZE 512 /* number of floats in lattice region */
/*
* potential lattice is decomposed into size 8^3 lattice point "regions"
*
* THIS IMPLEMENTATION: one thread per lattice point
* thread block size 128 gives 4 thread blocks per region
* kernel is invoked for each x-y plane of regions,
* where gridDim.x is 4*(x region dimension) so that blockIdx.x
* can absorb the z sub-region index in its 2 lowest order bits
*
* Regions are stored contiguously in memory in row-major order
*
* The bins have to not only cover the region, but they need to surround
* the outer edges so that region sides and corners can still use
* neighbor list stencil. The binZeroAddr is actually a shifted pointer into
* the bin array (binZeroAddr = binBaseAddr + (c*binDim_y + c)*binDim_x + c)
* where c = ceil(cutoff / binsize). This allows for negative offsets to
* be added to myBinIndex.
*
* The (0,0,0) spatial origin corresponds to lower left corner of both
* regionZeroAddr and binZeroAddr. The atom coordinates are translated
* during binning to enforce this assumption.
*/
__global__ static void cuda_cutoff_potential_lattice6overlap(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
ener_t *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex
)
{
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ ener_t *myRegionAddr;
__shared__ int3 myBinIndex;
const int xRegionIndex = blockIdx.x;
const int yRegionIndex = blockIdx.y;
/* thread id */
const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x
+ threadIdx.x;
/* blockDim.x == 8, blockDim.y == 2, blockDim.z == 8 */
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*gridDim.y
+ yRegionIndex)*gridDim.x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + threadIdx.x) * h;
float y = (8 * yRegionIndex + threadIdx.y) * h;
float z = (8 * zRegionIndex + threadIdx.z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
#ifndef NEIGHBOR_COUNT
ener_t energy0 = 0.f;
ener_t energy1 = 0.f;
ener_t energy2 = 0.f;
ener_t energy3 = 0.f;
#else
ener_t energy0 = 0, energy1 = 0, energy2 = 0, energy3 = 0;
#endif
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr)
+ (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
__syncthreads();
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
int i;
for (i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
#else
energy0 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
#else
energy1 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
#else
energy2 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
#else
energy3 += (r2 < cutoff2);
#endif
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
__syncthreads();
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
extern "C" int gpu_compute_cutoff_potential_lattice6overlap(
struct pb_TimerSet *timers, /* for measuring execution time */
Lattice *lattice,
float cutoff, /* cutoff distance */
Atoms *atoms, /* array of atoms */
int verbose /* print info/debug messages */
)
{
int nx = lattice->dim.nx;
int ny = lattice->dim.ny;
int nz = lattice->dim.nz;
float xlo = lattice->dim.lo.x;
float ylo = lattice->dim.lo.y;
float zlo = lattice->dim.lo.z;
float h = lattice->dim.h;
int natoms = atoms->size;
Atom *atom = atoms->atoms;
int3 nbrlist[NBRLIST_MAXLEN];
int nbrlistlen = 0;
int binHistoFull[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int binHistoCover[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int num_excluded = 0;
int xRegionDim, yRegionDim, zRegionDim;
int xRegionIndex, yRegionIndex, zRegionIndex;
int xOffset, yOffset, zOffset;
int lnx, lny, lnz, lnall;
ener_t *regionZeroAddr, *thisRegion;
ener_t *regionZeroCuda;
int index, indexRegion;
int c;
int3 binDim;
int nbins;
float4 *binBaseAddr, *binZeroAddr;
float4 *binBaseCuda, *binZeroCuda;
int *bincntBaseAddr, *bincntZeroAddr;
Atoms *extra = NULL;
int i, j, k, n;
int sum, total;
float avgFillFull, avgFillCover;
const float cutoff2 = cutoff * cutoff;
const float inv_cutoff2 = 1.f / cutoff2;
dim3 gridDim, blockDim;
#ifdef NEIGHBOR_COUNT
double neighbor_count = 0; /* used to profile the number of atoms near a
* lattice point */
#endif
// Caller has made the 'compute' timer active
/* pad lattice to be factor of 8 in each dimension */
xRegionDim = (int) ceilf(nx/8.f);
yRegionDim = (int) ceilf(ny/8.f);
zRegionDim = (int) ceilf(nz/8.f);
lnx = 8 * xRegionDim;
lny = 8 * yRegionDim;
lnz = 8 * zRegionDim;
lnall = lnx * lny * lnz;
/* will receive energies from CUDA */
regionZeroAddr = (ener_t *) malloc(lnall * sizeof(float));
/* create bins */
c = (int) ceil(cutoff * BIN_INVLEN); /* count extra bins around lattice */
binDim.x = (int) ceil(lnx * h * BIN_INVLEN) + 2*c;
binDim.y = (int) ceil(lny * h * BIN_INVLEN) + 2*c;
binDim.z = (int) ceil(lnz * h * BIN_INVLEN) + 2*c;
nbins = binDim.x * binDim.y * binDim.z;
binBaseAddr = (float4 *) calloc(nbins * BIN_DEPTH, sizeof(float4));
binZeroAddr = binBaseAddr + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
bincntBaseAddr = (int *) calloc(nbins, sizeof(int));
bincntZeroAddr = bincntBaseAddr + (c * binDim.y + c) * binDim.x + c;
/* create neighbor list */
if (ceilf(BIN_LENGTH / (8*h)) == floorf(BIN_LENGTH / (8*h))) {
float s = sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 1 cell */
if (2*c + 1 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-1)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else if (8*h <= 2*BIN_LENGTH) {
float s = 2.f*sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 3-cube of cells */
if (2*c + 3 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-3)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else {
fprintf(stderr, "must have h <= %f\n", 0.25 * BIN_LENGTH);
return -1;
}
/* perform geometric hashing of atoms into bins */
{
/* array of extra atoms, permit average of one extra per bin */
Atom *extra_atoms = (Atom *) calloc(nbins, sizeof(Atom));
int extra_len = 0;
for (n = 0; n < natoms; n++) {
float4 p;
p.x = atom[n].x - xlo;
p.y = atom[n].y - ylo;
p.z = atom[n].z - zlo;
p.w = atom[n].q;
i = (int) floorf(p.x * BIN_INVLEN);
j = (int) floorf(p.y * BIN_INVLEN);
k = (int) floorf(p.z * BIN_INVLEN);
if (i >= -c && i < binDim.x - c &&
j >= -c && j < binDim.y - c &&
k >= -c && k < binDim.z - c &&
atom[n].q != 0) {
int index = (k * binDim.y + j) * binDim.x + i;
float4 *bin = binZeroAddr + index * BIN_DEPTH;
int bindex = bincntZeroAddr[index];
if (bindex < BIN_DEPTH) {
/* copy atom into bin and increase counter for this bin */
bin[bindex] = p;
bincntZeroAddr[index]++;
}
else {
/* add index to array of extra atoms to be computed with CPU */
if (extra_len >= nbins) {
fprintf(stderr, "exceeded space for storing extra atoms\n");
return -1;
}
extra_atoms[extra_len] = atom[n];
extra_len++;
}
}
else {
/* excluded atoms are either outside bins or neutrally charged */
num_excluded++;
}
}
/* Save result */
extra = (Atoms *)malloc(sizeof(Atoms));
extra->atoms = extra_atoms;
extra->size = extra_len;
}
/* bin stats */
sum = total = 0;
for (n = 0; n < nbins; n++) {
binHistoFull[ bincntBaseAddr[n] ]++;
sum += bincntBaseAddr[n];
total += BIN_DEPTH;
}
avgFillFull = sum / (float) total;
sum = total = 0;
for (k = 0; k < binDim.z - 2*c; k++) {
for (j = 0; j < binDim.y - 2*c; j++) {
for (i = 0; i < binDim.x - 2*c; i++) {
int index = (k * binDim.y + j) * binDim.x + i;
binHistoCover[ bincntZeroAddr[index] ]++;
sum += bincntZeroAddr[index];
total += BIN_DEPTH;
}
}
}
avgFillCover = sum / (float) total;
if (verbose) {
/* report */
printf("number of atoms = %d\n", natoms);
printf("lattice spacing = %g\n", h);
printf("cutoff distance = %g\n", cutoff);
printf("\n");
printf("requested lattice dimensions = %d %d %d\n", nx, ny, nz);
printf("requested space dimensions = %g %g %g\n", nx*h, ny*h, nz*h);
printf("expanded lattice dimensions = %d %d %d\n", lnx, lny, lnz);
printf("expanded space dimensions = %g %g %g\n", lnx*h, lny*h, lnz*h);
printf("number of bytes for lattice data = %u\n", lnall*sizeof(float));
printf("\n");
printf("bin padding thickness = %d\n", c);
printf("bin cover dimensions = %d %d %d\n",
binDim.x - 2*c, binDim.y - 2*c, binDim.z - 2*c);
printf("bin full dimensions = %d %d %d\n", binDim.x, binDim.y, binDim.z);
printf("number of bins = %d\n", nbins);
printf("total number of atom slots = %d\n", nbins * BIN_DEPTH);
printf("%% overhead space = %g\n",
(natoms / (double) (nbins * BIN_DEPTH)) * 100);
printf("number of bytes for bin data = %u\n",
nbins * BIN_DEPTH * sizeof(float4));
printf("\n");
printf("bin histogram with padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoFull[n]);
sum += binHistoFull[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillFull * 100);
printf("\n");
printf("bin histogram excluding padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoCover[n]);
sum += binHistoCover[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillCover * 100);
printf("\n");
printf("number of extra atoms = %d\n", extra->size);
printf("%% atoms that are extra = %g\n", (extra->size / (double) natoms) * 100);
printf("\n");
/* sanity check on bins */
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoFull[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram with edges: "
"sum + others = %d\n", sum);
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoCover[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram excluding edges: "
"sum + others = %d\n", sum);
printf("\n");
/* neighbor list */
printf("neighbor list length = %d\n", nbrlistlen);
printf("\n");
}
/* setup CUDA kernel parameters */
gridDim.x = xRegionDim;
gridDim.y = yRegionDim;
gridDim.z = 1;
blockDim.x = 8;
blockDim.y = 2;
blockDim.z = 8;
/* allocate and initialize memory on CUDA device */
pb_SwitchToTimer(timers, pb_TimerID_COPY);
if (verbose) {
printf("Allocating %.2fMB on CUDA device for potentials\n",
lnall * sizeof(float) / (double) (1024*1024));
}
cudaMalloc((void **) ®ionZeroCuda, lnall * sizeof(ener_t));
CUERR;
cudaMemset(regionZeroCuda, 0, lnall * sizeof(ener_t));
CUERR;
if (verbose) {
printf("Allocating %.2fMB on CUDA device for atom bins\n",
nbins * BIN_DEPTH * sizeof(float4) / (double) (1024*1024));
}
cudaMalloc((void **) &binBaseCuda, nbins * BIN_DEPTH * sizeof(float4));
CUERR;
cudaMemcpy(binBaseCuda, binBaseAddr, nbins * BIN_DEPTH * sizeof(float4),
cudaMemcpyHostToDevice);
CUERR;
binZeroCuda = binBaseCuda + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
cudaMemcpyToSymbol(NbrListLen, &nbrlistlen, sizeof(int), 0);
CUERR;
cudaMemcpyToSymbol(NbrList, nbrlist, nbrlistlen * sizeof(int3), 0);
CUERR;
if (verbose)
printf("\n");
cudaStream_t cutoffstream;
cudaStreamCreate(&cutoffstream);
/* loop over z-dimension, invoke CUDA kernel for each x-y plane */
pb_SwitchToTimer(timers, pb_TimerID_KERNEL);
printf("Invoking CUDA kernel on %d region planes...\n", zRegionDim);
for (zRegionIndex = 0; zRegionIndex < zRegionDim; zRegionIndex++) {
printf(" computing plane %d\r", zRegionIndex);
fflush(stdout);
cuda_cutoff_potential_lattice6overlap<<<gridDim, blockDim, 0>>>(binDim.x, binDim.y,
binZeroCuda, h, cutoff2, inv_cutoff2, regionZeroCuda, zRegionIndex);
}
/*
* handle extra atoms on the CPU, concurrently with the GPU calculations
*/
pb_SwitchToTimer(timers, pb_TimerID_COMPUTE);
if (extra->size > 0) {
printf("computing extra atoms on CPU\n");
if (cpu_compute_cutoff_potential_lattice(lattice, cutoff, extra)) {
fprintf(stderr, "cpu_compute_cutoff_potential_lattice() failed "
"for extra atoms\n");
return -1;
}
printf("\n");
}
cudaStreamSynchronize(cutoffstream);
CUERR;
cudaThreadSynchronize();
cudaStreamDestroy(cutoffstream);
printf("Finished CUDA kernel calls \n");
/* copy result regions from CUDA device */
pb_SwitchToTimer(timers, pb_TimerID_COPY);
cudaMemcpy(regionZeroAddr, regionZeroCuda, lnall * sizeof(ener_t),
cudaMemcpyDeviceToHost);
CUERR;
/* free CUDA memory allocations */
cudaFree(regionZeroCuda);
cudaFree(binBaseCuda);
/*
* transpose on CPU, updating, producing the final lattice
*/
/* transpose regions back into lattice */
pb_SwitchToTimer(timers, pb_TimerID_COMPUTE);
for (k = 0; k < nz; k++) {
zRegionIndex = (k >> 3);
zOffset = (k & 7);
for (j = 0; j < ny; j++) {
yRegionIndex = (j >> 3);
yOffset = (j & 7);
for (i = 0; i < nx; i++) {
xRegionIndex = (i >> 3);
xOffset = (i & 7);
thisRegion = regionZeroAddr
+ ((zRegionIndex * yRegionDim + yRegionIndex) * xRegionDim
+ xRegionIndex) * REGION_SIZE;
indexRegion = (zOffset * 8 + yOffset) * 8 + xOffset;
index = (k * ny + j) * nx + i;
#ifndef NEIGHBOR_COUNT
lattice->lattice[index] += thisRegion[indexRegion];
#else
neighbor_count += thisRegion[indexRegion];
#endif
}
}
}
#ifdef NEIGHBOR_COUNT
printf("Neighbor count: %f\n", (float)neighbor_count);
#endif
/* cleanup memory allocations */
free(regionZeroAddr);
free(binBaseAddr);
free(bincntBaseAddr);
free_atom(extra);
return 0;
}
|
c3c1818c96189cba06f92b862890c04d0db72bf0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by marco on 22/05/20.
//
#include "K_means_Cuda.cuh"
#include <cmath>
#include <iostream>
__constant__ short constK;
__constant__ int constNumPoint;
__constant__ short constDimPoint;
void CheckCudaErrorAux(const char *file, unsigned line, const char *statement, hipError_t err) {
if (err == hipSuccess) {
return;
}
std::cerr << statement << " returned " << hipGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl;
exit(1);
}
void print_device(double *device, int row, int col){
double *host;
host = (double *) malloc(row * col * sizeof(double));
hipMemcpy(host, device, row * col * sizeof(double),hipMemcpyDeviceToHost);
for (auto i = 0; i < row; i++) {
for (auto j = 0; j < col; j++) {
std::cout <<"- "<< host[i * col + j] << " ";
}
std::cout << "-" << std::endl;
}
std::cout << std::endl;
}
void print_device(short *device, int row, int col){
short *host;
host = (short *) malloc(row * col * sizeof(short));
hipMemcpy(host, device, row * col * sizeof(short),hipMemcpyDeviceToHost);
for (auto i = 0; i < row; i++) {
for (auto j = 0; j < col; j++) {
std::cout <<"- "<< host[i * col + j] << " ";
}
std::cout << "-" << std::endl;
}
std::cout << std::endl;
}
void print_device(int *device, int row, int col){
int *host;
host = (int *) malloc(row * col * sizeof(int));
hipMemcpy(host, device, row * col * sizeof(int),hipMemcpyDeviceToHost);
for (auto i = 0; i < row; i++) {
for (auto j = 0; j < col; j++) {
std::cout <<"- "<< host[i * col + j] << " ";
}
std::cout << "-" << std::endl;
}
std::cout << std::endl;
}
/*
//INIZIALIZE CENDROID ASSIGNEMENT TO ZERO FOR ALL POINT'S DATASETS
//Assegno ogni punto al cluster -1
__global__
void initialize_assignment(short * deviceAssignment){
unsigned int threadId = (blockDim.x * blockIdx.x) + threadIdx.x;
if (threadId < constNumPoint){
//printf("STAMPA DEL DEVICEASSIGNEMENT [%d] \n",deviceAssignment[threadId]);
deviceAssignment[threadId] = -1;
}
}
*/
__device__ double doubleAtomicAdd(double*address, double val){
auto *address_as_ull = (unsigned long long int *) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double((long long int)assumed)));
} while (assumed != old);
return __longlong_as_double((long long int)old);
}
__host__
bool checkEqualAssignment(const short * hostOldAssignment, const short * hostAssignment, const int numPoint){
for (auto i = 0; i < numPoint; i++){
if(hostOldAssignment[i] != hostAssignment[i]){
return false;
}
}
return true;
}
__global__
void compute_distances(const double * deviceDataset, const double * deviceCentroids, double * deviceDistances){
double distance = 0;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if(row < constNumPoint && col < constK){
for (int i = 0; i < constDimPoint; i++) {
distance += pow(deviceDataset[row*constDimPoint+i] - deviceCentroids[col*constDimPoint+i], 2);
}
deviceDistances[row*constK+col] = sqrt(distance);
}
}
__global__
void point_assignment(const double *deviceDistances, short *deviceAssignment){
unsigned int threadId = (blockDim.x * blockIdx.x) + threadIdx.x;
double min = INFINITY;
short clusterLabel;
double distance;
if (threadId < constNumPoint){
for (auto i = 0; i < constK; i++){
distance = deviceDistances[threadId*constK + i];
if(distance < min){
min = distance;
clusterLabel = i;
}
}
deviceAssignment[threadId] = clusterLabel;
}
}
__global__
void initialize_centroids(double * deviceCentroids){
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < constDimPoint && row < constK){
deviceCentroids[row*constDimPoint + col] = 0;
}
}
//Original compute sum with 2D grid (better with dataset with too much dimensions)
__global__
void compute_sum(const double *deviceDataset, double * deviceCentroids, const short *deviceAssignment, int * deviceCount){
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < constDimPoint && row < constNumPoint){
short clusterId = deviceAssignment[row];
doubleAtomicAdd(&deviceCentroids[clusterId*constDimPoint +col], deviceDataset[row*constDimPoint +col]);
atomicAdd(&deviceCount[clusterId], 1);
}
}
//compute sum with 1D grid and iterate on point's dimensions
__global__
void compute_sum2(const double *deviceDataset, double * deviceCentroids, const short *deviceAssignment, int * deviceCount){
unsigned int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < constNumPoint){
short clusterId = deviceAssignment[row];
for (auto i = 0; i< constDimPoint; i++){
doubleAtomicAdd(&deviceCentroids[clusterId*constDimPoint+i], deviceDataset[row*constDimPoint+i]);
}
atomicAdd(&deviceCount[clusterId], 1);
}
}
//Update centroids with 2D grid (better with dataset with too much dimensions)
__global__
void update_centroids(double * deviceCentroids, const int * deviceCount){
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < constDimPoint && row < constK) {
deviceCentroids[row * constDimPoint + col] = deviceCentroids[row * constDimPoint + col] / (double(deviceCount[row])/constDimPoint);
}
}
//Update centroids with 1D grid (no need to divide count for point's dimensions)
__global__
void update_centroids2(double * deviceCentroids, const int * deviceCount){
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < constDimPoint && row < constK) {
deviceCentroids[row * constDimPoint + col] /= deviceCount[row];
}
}
__host__
std::tuple<double *, short *>cuda_KMeans(double * deviceDataset, double * deviceCentroids, const int numPoint, const short k, const short dimPoint){
//int c = 0;
dim3 dimBlockDistance(2, 512, 1);
dim3 dimGridDistance(ceil(k/2.0), ceil(numPoint/512.0), 1);
dim3 dimBlockInitialize(16, 16, 1);
dim3 dimGridInitialize(ceil(dimPoint / 16.0), ceil(k / 16.0), 1);
dim3 dimBlockComputeSum(2, 512, 1);
dim3 dimGridComputeSum(ceil(dimPoint / 2.0), ceil(numPoint / 512.0), 1);
dim3 dimBlockUpdateCentroids(16, 16, 1);
dim3 dimGridUpdateCentroids(ceil(dimPoint / 16.0), ceil(k / 16.0), 1);
CUDA_CHECK_RETURN(hipMemcpyToSymbol(constK, &k, sizeof(short)));
CUDA_CHECK_RETURN(hipMemcpyToSymbol(constNumPoint, &numPoint, sizeof(int)));
CUDA_CHECK_RETURN(hipMemcpyToSymbol(constDimPoint, &dimPoint, sizeof(short)));
//constant_dimPoint = dimPoint;
bool convergence = false;
short * hostOldAssignment;
hostOldAssignment = (short *) malloc(numPoint * sizeof(short));
short * hostAssignment;
hostAssignment = (short *) malloc(numPoint * sizeof(short));
short * deviceAssignment;
CUDA_CHECK_RETURN(hipMalloc((void **) &deviceAssignment, numPoint*sizeof(short)));
double * deviceDistances;
CUDA_CHECK_RETURN(hipMalloc((void**) &deviceDistances, numPoint*k*sizeof(double)));
int * deviceCount;
CUDA_CHECK_RETURN(hipMalloc((void**) &deviceCount, k*sizeof(int)));
while (!convergence){
//ASSIGNMENT
//Find the nearest centroid and assign the point to that cluster
hipLaunchKernelGGL(( compute_distances), dim3(dimGridDistance), dim3(dimBlockDistance), 0, 0, deviceDataset, deviceCentroids, deviceDistances);
hipDeviceSynchronize();
hipLaunchKernelGGL(( point_assignment), dim3(ceil(numPoint/1024.0)), dim3(1024), 0, 0, deviceDistances, deviceAssignment);
hipDeviceSynchronize();
//CENTROIDS UPDATE
//Initialize centroids to 0 and set count to 0 (for compute means)
hipLaunchKernelGGL(( initialize_centroids), dim3(dimGridInitialize), dim3(dimBlockInitialize), 0, 0, deviceCentroids);
//print_device(deviceCentroids, k, dimPoint);
CUDA_CHECK_RETURN(hipMemset(deviceCount, 0, k*sizeof(int)));
//print_device(deviceCount, k, 1);
hipDeviceSynchronize();
//Compute all sum for centroids
hipLaunchKernelGGL(( compute_sum), dim3(dimGridComputeSum),dim3(dimBlockComputeSum), 0, 0, deviceDataset, deviceCentroids, deviceAssignment, deviceCount);
//compute_sum2<<<ceil(numPoint/1024.0), 1024>>>(deviceDataset, deviceCentroids, deviceAssignment, deviceCount);
hipDeviceSynchronize();
//printf("\n STAMPA DI TEST \n");
//print_device(deviceCentroids, k, dimPoint);
//printf("\n");
//Compute mean: division for count
hipLaunchKernelGGL(( update_centroids), dim3(dimGridUpdateCentroids),dim3(dimBlockUpdateCentroids), 0, 0, deviceCentroids,deviceCount);
//update_centroids2<<<dimGridUpdateCentroids,dimBlockUpdateCentroids>>>(deviceCentroids,deviceCount);
hipDeviceSynchronize();
CUDA_CHECK_RETURN(hipMemcpy(hostAssignment, deviceAssignment, numPoint*sizeof(short), hipMemcpyDeviceToHost));
//c ++;
if (checkEqualAssignment(hostOldAssignment, hostAssignment, numPoint)){
convergence = true;
}
else{
CUDA_CHECK_RETURN(hipMemcpy(hostOldAssignment, deviceAssignment, numPoint*sizeof(short), hipMemcpyDeviceToHost));
}
//printf("\n");
}
//std::cout << "Numero di iterazioni: " << c << " \n";
return{deviceCentroids, hostAssignment};
}
| c3c1818c96189cba06f92b862890c04d0db72bf0.cu | //
// Created by marco on 22/05/20.
//
#include "K_means_Cuda.cuh"
#include <cmath>
#include <iostream>
__constant__ short constK;
__constant__ int constNumPoint;
__constant__ short constDimPoint;
void CheckCudaErrorAux(const char *file, unsigned line, const char *statement, cudaError_t err) {
if (err == cudaSuccess) {
return;
}
std::cerr << statement << " returned " << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl;
exit(1);
}
void print_device(double *device, int row, int col){
double *host;
host = (double *) malloc(row * col * sizeof(double));
cudaMemcpy(host, device, row * col * sizeof(double),cudaMemcpyDeviceToHost);
for (auto i = 0; i < row; i++) {
for (auto j = 0; j < col; j++) {
std::cout <<"- "<< host[i * col + j] << " ";
}
std::cout << "-" << std::endl;
}
std::cout << std::endl;
}
void print_device(short *device, int row, int col){
short *host;
host = (short *) malloc(row * col * sizeof(short));
cudaMemcpy(host, device, row * col * sizeof(short),cudaMemcpyDeviceToHost);
for (auto i = 0; i < row; i++) {
for (auto j = 0; j < col; j++) {
std::cout <<"- "<< host[i * col + j] << " ";
}
std::cout << "-" << std::endl;
}
std::cout << std::endl;
}
void print_device(int *device, int row, int col){
int *host;
host = (int *) malloc(row * col * sizeof(int));
cudaMemcpy(host, device, row * col * sizeof(int),cudaMemcpyDeviceToHost);
for (auto i = 0; i < row; i++) {
for (auto j = 0; j < col; j++) {
std::cout <<"- "<< host[i * col + j] << " ";
}
std::cout << "-" << std::endl;
}
std::cout << std::endl;
}
/*
//INIZIALIZE CENDROID ASSIGNEMENT TO ZERO FOR ALL POINT'S DATASETS
//Assegno ogni punto al cluster -1
__global__
void initialize_assignment(short * deviceAssignment){
unsigned int threadId = (blockDim.x * blockIdx.x) + threadIdx.x;
if (threadId < constNumPoint){
//printf("STAMPA DEL DEVICEASSIGNEMENT [%d] \n",deviceAssignment[threadId]);
deviceAssignment[threadId] = -1;
}
}
*/
__device__ double doubleAtomicAdd(double*address, double val){
auto *address_as_ull = (unsigned long long int *) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double((long long int)assumed)));
} while (assumed != old);
return __longlong_as_double((long long int)old);
}
__host__
bool checkEqualAssignment(const short * hostOldAssignment, const short * hostAssignment, const int numPoint){
for (auto i = 0; i < numPoint; i++){
if(hostOldAssignment[i] != hostAssignment[i]){
return false;
}
}
return true;
}
__global__
void compute_distances(const double * deviceDataset, const double * deviceCentroids, double * deviceDistances){
double distance = 0;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if(row < constNumPoint && col < constK){
for (int i = 0; i < constDimPoint; i++) {
distance += pow(deviceDataset[row*constDimPoint+i] - deviceCentroids[col*constDimPoint+i], 2);
}
deviceDistances[row*constK+col] = sqrt(distance);
}
}
__global__
void point_assignment(const double *deviceDistances, short *deviceAssignment){
unsigned int threadId = (blockDim.x * blockIdx.x) + threadIdx.x;
double min = INFINITY;
short clusterLabel;
double distance;
if (threadId < constNumPoint){
for (auto i = 0; i < constK; i++){
distance = deviceDistances[threadId*constK + i];
if(distance < min){
min = distance;
clusterLabel = i;
}
}
deviceAssignment[threadId] = clusterLabel;
}
}
__global__
void initialize_centroids(double * deviceCentroids){
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < constDimPoint && row < constK){
deviceCentroids[row*constDimPoint + col] = 0;
}
}
//Original compute sum with 2D grid (better with dataset with too much dimensions)
__global__
void compute_sum(const double *deviceDataset, double * deviceCentroids, const short *deviceAssignment, int * deviceCount){
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < constDimPoint && row < constNumPoint){
short clusterId = deviceAssignment[row];
doubleAtomicAdd(&deviceCentroids[clusterId*constDimPoint +col], deviceDataset[row*constDimPoint +col]);
atomicAdd(&deviceCount[clusterId], 1);
}
}
//compute sum with 1D grid and iterate on point's dimensions
__global__
void compute_sum2(const double *deviceDataset, double * deviceCentroids, const short *deviceAssignment, int * deviceCount){
unsigned int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < constNumPoint){
short clusterId = deviceAssignment[row];
for (auto i = 0; i< constDimPoint; i++){
doubleAtomicAdd(&deviceCentroids[clusterId*constDimPoint+i], deviceDataset[row*constDimPoint+i]);
}
atomicAdd(&deviceCount[clusterId], 1);
}
}
//Update centroids with 2D grid (better with dataset with too much dimensions)
__global__
void update_centroids(double * deviceCentroids, const int * deviceCount){
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < constDimPoint && row < constK) {
deviceCentroids[row * constDimPoint + col] = deviceCentroids[row * constDimPoint + col] / (double(deviceCount[row])/constDimPoint);
}
}
//Update centroids with 1D grid (no need to divide count for point's dimensions)
__global__
void update_centroids2(double * deviceCentroids, const int * deviceCount){
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < constDimPoint && row < constK) {
deviceCentroids[row * constDimPoint + col] /= deviceCount[row];
}
}
__host__
std::tuple<double *, short *>cuda_KMeans(double * deviceDataset, double * deviceCentroids, const int numPoint, const short k, const short dimPoint){
//int c = 0;
dim3 dimBlockDistance(2, 512, 1);
dim3 dimGridDistance(ceil(k/2.0), ceil(numPoint/512.0), 1);
dim3 dimBlockInitialize(16, 16, 1);
dim3 dimGridInitialize(ceil(dimPoint / 16.0), ceil(k / 16.0), 1);
dim3 dimBlockComputeSum(2, 512, 1);
dim3 dimGridComputeSum(ceil(dimPoint / 2.0), ceil(numPoint / 512.0), 1);
dim3 dimBlockUpdateCentroids(16, 16, 1);
dim3 dimGridUpdateCentroids(ceil(dimPoint / 16.0), ceil(k / 16.0), 1);
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(constK, &k, sizeof(short)));
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(constNumPoint, &numPoint, sizeof(int)));
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(constDimPoint, &dimPoint, sizeof(short)));
//constant_dimPoint = dimPoint;
bool convergence = false;
short * hostOldAssignment;
hostOldAssignment = (short *) malloc(numPoint * sizeof(short));
short * hostAssignment;
hostAssignment = (short *) malloc(numPoint * sizeof(short));
short * deviceAssignment;
CUDA_CHECK_RETURN(cudaMalloc((void **) &deviceAssignment, numPoint*sizeof(short)));
double * deviceDistances;
CUDA_CHECK_RETURN(cudaMalloc((void**) &deviceDistances, numPoint*k*sizeof(double)));
int * deviceCount;
CUDA_CHECK_RETURN(cudaMalloc((void**) &deviceCount, k*sizeof(int)));
while (!convergence){
//ASSIGNMENT
//Find the nearest centroid and assign the point to that cluster
compute_distances<<<dimGridDistance, dimBlockDistance>>>(deviceDataset, deviceCentroids, deviceDistances);
cudaDeviceSynchronize();
point_assignment<<<ceil(numPoint/1024.0), 1024>>>(deviceDistances, deviceAssignment);
cudaDeviceSynchronize();
//CENTROIDS UPDATE
//Initialize centroids to 0 and set count to 0 (for compute means)
initialize_centroids<<<dimGridInitialize, dimBlockInitialize>>>(deviceCentroids);
//print_device(deviceCentroids, k, dimPoint);
CUDA_CHECK_RETURN(cudaMemset(deviceCount, 0, k*sizeof(int)));
//print_device(deviceCount, k, 1);
cudaDeviceSynchronize();
//Compute all sum for centroids
compute_sum<<<dimGridComputeSum,dimBlockComputeSum>>>(deviceDataset, deviceCentroids, deviceAssignment, deviceCount);
//compute_sum2<<<ceil(numPoint/1024.0), 1024>>>(deviceDataset, deviceCentroids, deviceAssignment, deviceCount);
cudaDeviceSynchronize();
//printf("\n STAMPA DI TEST \n");
//print_device(deviceCentroids, k, dimPoint);
//printf("\n");
//Compute mean: division for count
update_centroids<<<dimGridUpdateCentroids,dimBlockUpdateCentroids>>>(deviceCentroids,deviceCount);
//update_centroids2<<<dimGridUpdateCentroids,dimBlockUpdateCentroids>>>(deviceCentroids,deviceCount);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemcpy(hostAssignment, deviceAssignment, numPoint*sizeof(short), cudaMemcpyDeviceToHost));
//c ++;
if (checkEqualAssignment(hostOldAssignment, hostAssignment, numPoint)){
convergence = true;
}
else{
CUDA_CHECK_RETURN(cudaMemcpy(hostOldAssignment, deviceAssignment, numPoint*sizeof(short), cudaMemcpyDeviceToHost));
}
//printf("\n");
}
//std::cout << "Numero di iterazioni: " << c << " \n";
return{deviceCentroids, hostAssignment};
}
|
09dff7ee194675350d9e5f27d818ae3f65ae12ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <assert.h>
#include <vector>
using namespace std;
const int INF = 10000000;
const int V = 10010;
const int MAX_THREAD_DIM2 = 32;
void input(char *inFileName, int B);
void output(char *outFileName);
void block_FW_2GPU(int B);
int ceil(int a, int b);
void calAsync(int gpuId, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height);
int realn;
int n, m; // Number of vertices, edges
int* Dist; // n * n, on host
int* dDist[2]; // n * n, on device
int streamSize[2];
vector<hipStream_t> streams[2];
int getGPUId ()
{
int gpuId;
hipGetDevice(&gpuId);
return gpuId;
}
hipStream_t getIdleStream (int gpuId)
{
hipSetDevice(gpuId);
if(streams[gpuId].size() == streamSize[gpuId])
{
hipStream_t stm;
hipStreamCreate(&stm);
streams[gpuId].push_back(stm);
streamSize[gpuId]++;
return stm;
}
else
return streams[gpuId][streamSize[gpuId]++];
}
void syncAllStreams ()
{
hipDeviceSynchronize();
streamSize[0] = 0;
streamSize[1] = 0;
}
void blockCopyAsync (int gpuId, int* dst, const int* src, hipMemcpyKind kind, hipStream_t stream, int B, int bi0, int bi1, int bj0, int bj1)
{
hipSetDevice(gpuId);
for(int i = bi0 * B; i < bi1 * B; ++i)
{
int offset = i * n + bj0 * B;
int size = (bj1 - bj0) * B * sizeof(int);
hipMemcpyAsync(dst + offset, src + offset, size, kind, stream);
}
}
int main(int argc, char* argv[])
{
int B = atoi(argv[3]);
input(argv[1], B);
block_FW_2GPU(B);
output(argv[2]);
return 0;
}
void input(char *inFileName, int B)
{
FILE *infile = fopen(inFileName, "r");
fscanf(infile, "%d %d", &realn, &m);
n = ceil(realn, B) * B;
hipMallocManaged(&Dist, n * n * sizeof(int));
for (int i = 0, k = 0; i < n; ++i) {
for (int j = 0; j < n; ++j, ++k) {
if (i == j) Dist[k] = 0;
else Dist[k] = INF;
}
}
while (--m >= 0) {
int a, b, v;
fscanf(infile, "%d %d %d", &a, &b, &v);
--a, --b;
Dist[a * n + b] = v;
}
}
void output(char *outFileName)
{
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(outfile, "INF ");
else fprintf(outfile, "%d ", d);
}
fprintf(outfile, "\n");
}
hipFree(Dist);
}
void print ()
{
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(stderr, "INF ");
else fprintf(stderr, "%d ", d);
}
fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
int ceil(int a, int b)
{
return (a + b -1)/b;
}
void block_FW_2GPU(int B)
{
int round = ceil(n, B);
for (int r = 0; r < round; ++r) {
/* Phase 1*/
fprintf(stderr, "Round: %d\n", r);
calAsync(0, B, r, r, r, 1, 1);
syncAllStreams();
/* Phase 2*/
calAsync(0, B, r, r, 0, r, 1); // L 0
calAsync(0, B, r, r, r +1, round - r -1, 1); // R 0
calAsync(1, B, r, 0, r, 1, r); // U 1
calAsync(1, B, r, r +1, r, 1, round - r -1); // D 1
syncAllStreams();
/* Phase 3*/
calAsync(0, B, r, 0, 0, r, r); // <^
calAsync(1, B, r, 0, r +1, round -r -1, r); // ^>
calAsync(1, B, r, r +1, 0, r, round - r -1); // <v
calAsync(0, B, r, r +1, r +1, round -r -1, round - r -1); // v>
syncAllStreams();
}
}
__global__
void Update (int k, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int i = blockDim.x * blockIdx.x + threadIdx.x + i0;
int j = blockDim.y * blockIdx.y + threadIdx.y + j0;
if(i >= i1 || j >= j1)
return;
int Dik = D(i, k);
int Dkj = D(k, j);
int D1 = Dik + Dkj;
if (D1 < D(i, j))
D(i, j) = D1;
}
__global__
void UpdateIndependent (int k0, int k1, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int tx = threadIdx.x;
int ty = threadIdx.y;
int di = blockDim.x * blockIdx.x + tx;
int dj = blockDim.y * blockIdx.y + ty;
int i = i0 + di;
int j = j0 + dj;
bool valid = i < i1 && j < j1;
__shared__ int Si[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
__shared__ int Sj[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
const int cacheSize = MAX_THREAD_DIM2;
int Dij = valid? D(i, j): 0;
int dkmod = 0;
for(int k = k0; k < k1; ++k)
{
if(dkmod == 0)
{
__syncthreads();
if(i < i1 && k+ty < k1)
Si[ty][tx] = D(i, k+ty);
if(j < j1 && k+tx < k1)
Sj[tx][ty] = D(k+tx, j);
__syncthreads();
}
if(valid)
{
// assert(Si[tx][dkmod] == D(i,k));
// assert(Sj[dkmod][ty] == D(k,j));
// int Dik = D(i, k);
// int Dkj = D(k, j);
int Dik = Si[dkmod][tx];
int Dkj = Sj[dkmod][ty];
int D1 = Dik + Dkj;
if (D1 < Dij)
Dij = D1;
}
dkmod = (dkmod + 1) % cacheSize;
}
if(valid)
D(i, j) = Dij;
}
void calAsync(int gpuId, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height)
{
hipSetDevice(gpuId);
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
for (int b_i = block_start_x; b_i < block_end_x; ++b_i) {
for (int b_j = block_start_y; b_j < block_end_y; ++b_j) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
// for (int k = Round * B; k < (Round +1) * B && k < n; ++k) {
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int i0 = b_i * B;
int i1 = min((b_i +1) * B, n);
int j0 = b_j * B;
int j1 = min((b_j +1) * B, n);
int k0 = Round * B;
int k1 = min((Round +1) * B, n);
bool iDepends = i0 == k0;
bool jDepends = j0 == k0;
int threadDim = MAX_THREAD_DIM2;//::min(B, MAX_THREAD_DIM2);
int blockDim = (B + MAX_THREAD_DIM2 - 1) / MAX_THREAD_DIM2;
dim3 grid(blockDim, blockDim), block(threadDim, threadDim);
hipStream_t stm = getIdleStream(gpuId);
if(iDepends || jDepends)
{
for(int k=k0; k<k1; ++k)
hipLaunchKernelGGL(( Update), dim3(grid), dim3(block), 0, stm, k, i0, j0, i1, j1, Dist, n);
}
else
hipLaunchKernelGGL(( UpdateIndependent), dim3(grid), dim3(block), 0, stm, k0, k1, i0, j0, i1, j1, Dist, n);
// for (int i = i0; i < i1; ++i) {
// for (int j = j0; j < j1; ++j) {
// if (Dist[i][k] + Dist[k][j] < Dist[i][j])
// Dist[i][j] = Dist[i][k] + Dist[k][j];
// }
// }
// }
}
}
}
| 09dff7ee194675350d9e5f27d818ae3f65ae12ef.cu | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <assert.h>
#include <vector>
using namespace std;
const int INF = 10000000;
const int V = 10010;
const int MAX_THREAD_DIM2 = 32;
void input(char *inFileName, int B);
void output(char *outFileName);
void block_FW_2GPU(int B);
int ceil(int a, int b);
void calAsync(int gpuId, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height);
int realn;
int n, m; // Number of vertices, edges
int* Dist; // n * n, on host
int* dDist[2]; // n * n, on device
int streamSize[2];
vector<cudaStream_t> streams[2];
int getGPUId ()
{
int gpuId;
cudaGetDevice(&gpuId);
return gpuId;
}
cudaStream_t getIdleStream (int gpuId)
{
cudaSetDevice(gpuId);
if(streams[gpuId].size() == streamSize[gpuId])
{
cudaStream_t stm;
cudaStreamCreate(&stm);
streams[gpuId].push_back(stm);
streamSize[gpuId]++;
return stm;
}
else
return streams[gpuId][streamSize[gpuId]++];
}
void syncAllStreams ()
{
cudaThreadSynchronize();
streamSize[0] = 0;
streamSize[1] = 0;
}
void blockCopyAsync (int gpuId, int* dst, const int* src, cudaMemcpyKind kind, cudaStream_t stream, int B, int bi0, int bi1, int bj0, int bj1)
{
cudaSetDevice(gpuId);
for(int i = bi0 * B; i < bi1 * B; ++i)
{
int offset = i * n + bj0 * B;
int size = (bj1 - bj0) * B * sizeof(int);
cudaMemcpyAsync(dst + offset, src + offset, size, kind, stream);
}
}
int main(int argc, char* argv[])
{
int B = atoi(argv[3]);
input(argv[1], B);
block_FW_2GPU(B);
output(argv[2]);
return 0;
}
void input(char *inFileName, int B)
{
FILE *infile = fopen(inFileName, "r");
fscanf(infile, "%d %d", &realn, &m);
n = ceil(realn, B) * B;
cudaMallocManaged(&Dist, n * n * sizeof(int));
for (int i = 0, k = 0; i < n; ++i) {
for (int j = 0; j < n; ++j, ++k) {
if (i == j) Dist[k] = 0;
else Dist[k] = INF;
}
}
while (--m >= 0) {
int a, b, v;
fscanf(infile, "%d %d %d", &a, &b, &v);
--a, --b;
Dist[a * n + b] = v;
}
}
void output(char *outFileName)
{
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(outfile, "INF ");
else fprintf(outfile, "%d ", d);
}
fprintf(outfile, "\n");
}
cudaFree(Dist);
}
void print ()
{
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(stderr, "INF ");
else fprintf(stderr, "%d ", d);
}
fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
int ceil(int a, int b)
{
return (a + b -1)/b;
}
void block_FW_2GPU(int B)
{
int round = ceil(n, B);
for (int r = 0; r < round; ++r) {
/* Phase 1*/
fprintf(stderr, "Round: %d\n", r);
calAsync(0, B, r, r, r, 1, 1);
syncAllStreams();
/* Phase 2*/
calAsync(0, B, r, r, 0, r, 1); // L 0
calAsync(0, B, r, r, r +1, round - r -1, 1); // R 0
calAsync(1, B, r, 0, r, 1, r); // U 1
calAsync(1, B, r, r +1, r, 1, round - r -1); // D 1
syncAllStreams();
/* Phase 3*/
calAsync(0, B, r, 0, 0, r, r); // <^
calAsync(1, B, r, 0, r +1, round -r -1, r); // ^>
calAsync(1, B, r, r +1, 0, r, round - r -1); // <v
calAsync(0, B, r, r +1, r +1, round -r -1, round - r -1); // v>
syncAllStreams();
}
}
__global__
void Update (int k, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int i = blockDim.x * blockIdx.x + threadIdx.x + i0;
int j = blockDim.y * blockIdx.y + threadIdx.y + j0;
if(i >= i1 || j >= j1)
return;
int Dik = D(i, k);
int Dkj = D(k, j);
int D1 = Dik + Dkj;
if (D1 < D(i, j))
D(i, j) = D1;
}
__global__
void UpdateIndependent (int k0, int k1, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int tx = threadIdx.x;
int ty = threadIdx.y;
int di = blockDim.x * blockIdx.x + tx;
int dj = blockDim.y * blockIdx.y + ty;
int i = i0 + di;
int j = j0 + dj;
bool valid = i < i1 && j < j1;
__shared__ int Si[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
__shared__ int Sj[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
const int cacheSize = MAX_THREAD_DIM2;
int Dij = valid? D(i, j): 0;
int dkmod = 0;
for(int k = k0; k < k1; ++k)
{
if(dkmod == 0)
{
__syncthreads();
if(i < i1 && k+ty < k1)
Si[ty][tx] = D(i, k+ty);
if(j < j1 && k+tx < k1)
Sj[tx][ty] = D(k+tx, j);
__syncthreads();
}
if(valid)
{
// assert(Si[tx][dkmod] == D(i,k));
// assert(Sj[dkmod][ty] == D(k,j));
// int Dik = D(i, k);
// int Dkj = D(k, j);
int Dik = Si[dkmod][tx];
int Dkj = Sj[dkmod][ty];
int D1 = Dik + Dkj;
if (D1 < Dij)
Dij = D1;
}
dkmod = (dkmod + 1) % cacheSize;
}
if(valid)
D(i, j) = Dij;
}
void calAsync(int gpuId, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height)
{
cudaSetDevice(gpuId);
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
for (int b_i = block_start_x; b_i < block_end_x; ++b_i) {
for (int b_j = block_start_y; b_j < block_end_y; ++b_j) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
// for (int k = Round * B; k < (Round +1) * B && k < n; ++k) {
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int i0 = b_i * B;
int i1 = min((b_i +1) * B, n);
int j0 = b_j * B;
int j1 = min((b_j +1) * B, n);
int k0 = Round * B;
int k1 = min((Round +1) * B, n);
bool iDepends = i0 == k0;
bool jDepends = j0 == k0;
int threadDim = MAX_THREAD_DIM2;//std::min(B, MAX_THREAD_DIM2);
int blockDim = (B + MAX_THREAD_DIM2 - 1) / MAX_THREAD_DIM2;
dim3 grid(blockDim, blockDim), block(threadDim, threadDim);
cudaStream_t stm = getIdleStream(gpuId);
if(iDepends || jDepends)
{
for(int k=k0; k<k1; ++k)
Update<<<grid, block, 0, stm>>>(k, i0, j0, i1, j1, Dist, n);
}
else
UpdateIndependent<<<grid, block, 0, stm>>>(k0, k1, i0, j0, i1, j1, Dist, n);
// for (int i = i0; i < i1; ++i) {
// for (int j = j0; j < j1; ++j) {
// if (Dist[i][k] + Dist[k][j] < Dist[i][j])
// Dist[i][j] = Dist[i][k] + Dist[k][j];
// }
// }
// }
}
}
}
|
9cf03e03f33c009fb198773802499fc847edd3b8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "property_generator.cuh"
#include <utilities/base_fixture.hpp>
#include <utilities/device_comm_wrapper.hpp>
#include <utilities/mg_utilities.hpp>
#include <utilities/test_graphs.hpp>
#include <utilities/test_utilities.hpp>
#include <utilities/thrust_wrapper.hpp>
#include <prims/property_op_utils.cuh>
#include <prims/reduce_v.cuh>
#include <cugraph/algorithms.hpp>
#include <cugraph/graph_view.hpp>
#include <cugraph/utilities/high_res_timer.hpp>
#include <cuco/hash_functions.cuh>
#include <raft/comms/mpi_comms.hpp>
#include <raft/core/comms.hpp>
#include <raft/core/handle.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/distance.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <gtest/gtest.h>
#include <random>
template <typename T>
struct result_compare {
static constexpr double threshold_ratio{1e-2};
constexpr auto operator()(const T& t1, const T& t2)
{
if constexpr (std::is_floating_point_v<T>) {
bool passed = (t1 == t2) // when t1 == t2 == 0
||
(std::abs(t1 - t2) < (::max(std::abs(t1), std::abs(t2)) * threshold_ratio));
return passed;
}
return t1 == t2;
}
};
template <typename... Args>
struct result_compare<thrust::tuple<Args...>> {
static constexpr double threshold_ratio{1e-3};
using Type = thrust::tuple<Args...>;
constexpr auto operator()(const Type& t1, const Type& t2)
{
return equality_impl(t1, t2, std::make_index_sequence<thrust::tuple_size<Type>::value>());
}
private:
template <typename T>
constexpr bool equal(T t1, T t2)
{
if constexpr (std::is_floating_point_v<T>) {
bool passed = (t1 == t2) // when t1 == t2 == 0
||
(std::abs(t1 - t2) < (::max(std::abs(t1), std::abs(t2)) * threshold_ratio));
return passed;
}
return t1 == t2;
}
template <typename T, std::size_t... I>
constexpr auto equality_impl(T& t1, T& t2, std::index_sequence<I...>)
{
return (... && (equal(thrust::get<I>(t1), thrust::get<I>(t2))));
}
};
struct Prims_Usecase {
bool check_correctness{true};
};
template <typename input_usecase_t>
class Tests_MGReduceV
: public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> {
public:
Tests_MGReduceV() {}
static void SetUpTestCase() { handle_ = cugraph::test::initialize_mg_handle(); }
static void TearDownTestCase() { handle_.reset(); }
virtual void SetUp() {}
virtual void TearDown() {}
// Compare the results of reduce_v primitive and thrust reduce on a single GPU
template <typename vertex_t,
typename edge_t,
typename weight_t,
typename result_t,
bool store_transposed>
void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase)
{
HighResTimer hr_timer{};
// 1. create MG graph
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.start("MG Construct graph");
}
cugraph::graph_t<vertex_t, edge_t, store_transposed, true> mg_graph(*handle_);
std::optional<rmm::device_uvector<vertex_t>> mg_renumber_map{std::nullopt};
std::tie(mg_graph, std::ignore, mg_renumber_map) =
cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, true>(
*handle_, input_usecase, true, true);
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
}
auto mg_graph_view = mg_graph.view();
// 2. run MG reduce_v
const int hash_bin_count = 5;
const int initial_value = 10;
auto property_initial_value =
cugraph::test::generate<vertex_t, result_t>::initial_value(initial_value);
auto mg_vertex_prop = cugraph::test::generate<vertex_t, result_t>::vertex_property(
*handle_, (*mg_renumber_map), hash_bin_count);
auto property_iter = cugraph::get_dataframe_buffer_begin(mg_vertex_prop);
enum class reduction_type_t { PLUS, MINIMUM, MAXIMUM };
std::array<reduction_type_t, 3> reduction_types = {
reduction_type_t::PLUS, reduction_type_t::MINIMUM, reduction_type_t::MAXIMUM};
std::unordered_map<reduction_type_t, result_t> results;
for (auto reduction_type : reduction_types) {
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.start("MG reduce_v");
}
switch (reduction_type) {
case reduction_type_t::PLUS:
results[reduction_type] = reduce_v(*handle_,
mg_graph_view,
property_iter,
property_initial_value,
cugraph::reduce_op::plus<result_t>{});
break;
case reduction_type_t::MINIMUM:
results[reduction_type] = reduce_v(*handle_,
mg_graph_view,
property_iter,
property_initial_value,
cugraph::reduce_op::minimum<result_t>{});
break;
case reduction_type_t::MAXIMUM:
results[reduction_type] = reduce_v(*handle_,
mg_graph_view,
property_iter,
property_initial_value,
cugraph::reduce_op::maximum<result_t>{});
break;
default: FAIL() << "should not be reached.";
}
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
}
}
// 3. compare SG & MG results
if (prims_usecase.check_correctness) {
cugraph::graph_t<vertex_t, edge_t, store_transposed, false> sg_graph(*handle_);
std::tie(sg_graph, std::ignore, std::ignore) = cugraph::test::mg_graph_to_sg_graph(
*handle_,
mg_graph_view,
std::optional<cugraph::edge_property_view_t<edge_t, weight_t const*>>{std::nullopt},
std::make_optional<raft::device_span<vertex_t const>>((*mg_renumber_map).data(),
(*mg_renumber_map).size()),
false);
if (handle_->get_comms().get_rank() == 0) {
auto sg_graph_view = sg_graph.view();
auto sg_vertex_prop = cugraph::test::generate<vertex_t, result_t>::vertex_property(
*handle_,
thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_first()),
thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_last()),
hash_bin_count);
auto sg_property_iter = cugraph::get_dataframe_buffer_begin(sg_vertex_prop);
for (auto reduction_type : reduction_types) {
result_t expected_result{};
switch (reduction_type) {
case reduction_type_t::PLUS:
expected_result = reduce_v(*handle_,
sg_graph_view,
sg_property_iter,
property_initial_value,
cugraph::reduce_op::plus<result_t>{});
break;
case reduction_type_t::MINIMUM:
expected_result = reduce_v(*handle_,
sg_graph_view,
sg_property_iter,
property_initial_value,
cugraph::reduce_op::minimum<result_t>{});
break;
case reduction_type_t::MAXIMUM:
expected_result = reduce_v(*handle_,
sg_graph_view,
sg_property_iter,
property_initial_value,
cugraph::reduce_op::maximum<result_t>{});
break;
default: FAIL() << "should not be reached.";
}
result_compare<result_t> compare{};
ASSERT_TRUE(compare(expected_result, results[reduction_type]));
}
}
}
}
private:
static std::unique_ptr<raft::handle_t> handle_;
};
template <typename input_usecase_t>
std::unique_ptr<raft::handle_t> Tests_MGReduceV<input_usecase_t>::handle_ = nullptr;
using Tests_MGReduceV_File = Tests_MGReduceV<cugraph::test::File_Usecase>;
using Tests_MGReduceV_Rmat = Tests_MGReduceV<cugraph::test::Rmat_Usecase>;
TEST_P(Tests_MGReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int32_t, float>, false>(
std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int32_t, float>, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int32_t, float>, true>(
std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int32_t, float>, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_File, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int32_t, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int32_t, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt32Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int32_t, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt64Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int32_t, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_File, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int32_t, true>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int32_t, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt32Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int32_t, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt64Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int32_t, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
INSTANTIATE_TEST_SUITE_P(
file_test,
Tests_MGReduceV_File,
::testing::Combine(
::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"),
cugraph::test::File_Usecase("test/datasets/web-Google.mtx"),
cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"),
cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx"))));
INSTANTIATE_TEST_SUITE_P(rmat_small_test,
Tests_MGReduceV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::Rmat_Usecase(
10, 16, 0.57, 0.19, 0.19, 0, false, false))));
INSTANTIATE_TEST_SUITE_P(
rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with
--gtest_filter to select only the rmat_benchmark_test with a specific
vertex & edge type combination) by command line arguments and do not
include more than one Rmat_Usecase that differ only in scale or edge
factor (to avoid running same benchmarks more than once) */
Tests_MGReduceV_Rmat,
::testing::Combine(
::testing::Values(Prims_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false))));
CUGRAPH_MG_TEST_PROGRAM_MAIN()
| 9cf03e03f33c009fb198773802499fc847edd3b8.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "property_generator.cuh"
#include <utilities/base_fixture.hpp>
#include <utilities/device_comm_wrapper.hpp>
#include <utilities/mg_utilities.hpp>
#include <utilities/test_graphs.hpp>
#include <utilities/test_utilities.hpp>
#include <utilities/thrust_wrapper.hpp>
#include <prims/property_op_utils.cuh>
#include <prims/reduce_v.cuh>
#include <cugraph/algorithms.hpp>
#include <cugraph/graph_view.hpp>
#include <cugraph/utilities/high_res_timer.hpp>
#include <cuco/hash_functions.cuh>
#include <raft/comms/mpi_comms.hpp>
#include <raft/core/comms.hpp>
#include <raft/core/handle.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/distance.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <gtest/gtest.h>
#include <random>
template <typename T>
struct result_compare {
static constexpr double threshold_ratio{1e-2};
constexpr auto operator()(const T& t1, const T& t2)
{
if constexpr (std::is_floating_point_v<T>) {
bool passed = (t1 == t2) // when t1 == t2 == 0
||
(std::abs(t1 - t2) < (std::max(std::abs(t1), std::abs(t2)) * threshold_ratio));
return passed;
}
return t1 == t2;
}
};
template <typename... Args>
struct result_compare<thrust::tuple<Args...>> {
static constexpr double threshold_ratio{1e-3};
using Type = thrust::tuple<Args...>;
constexpr auto operator()(const Type& t1, const Type& t2)
{
return equality_impl(t1, t2, std::make_index_sequence<thrust::tuple_size<Type>::value>());
}
private:
template <typename T>
constexpr bool equal(T t1, T t2)
{
if constexpr (std::is_floating_point_v<T>) {
bool passed = (t1 == t2) // when t1 == t2 == 0
||
(std::abs(t1 - t2) < (std::max(std::abs(t1), std::abs(t2)) * threshold_ratio));
return passed;
}
return t1 == t2;
}
template <typename T, std::size_t... I>
constexpr auto equality_impl(T& t1, T& t2, std::index_sequence<I...>)
{
return (... && (equal(thrust::get<I>(t1), thrust::get<I>(t2))));
}
};
struct Prims_Usecase {
bool check_correctness{true};
};
template <typename input_usecase_t>
class Tests_MGReduceV
: public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> {
public:
Tests_MGReduceV() {}
static void SetUpTestCase() { handle_ = cugraph::test::initialize_mg_handle(); }
static void TearDownTestCase() { handle_.reset(); }
virtual void SetUp() {}
virtual void TearDown() {}
// Compare the results of reduce_v primitive and thrust reduce on a single GPU
template <typename vertex_t,
typename edge_t,
typename weight_t,
typename result_t,
bool store_transposed>
void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase)
{
HighResTimer hr_timer{};
// 1. create MG graph
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.start("MG Construct graph");
}
cugraph::graph_t<vertex_t, edge_t, store_transposed, true> mg_graph(*handle_);
std::optional<rmm::device_uvector<vertex_t>> mg_renumber_map{std::nullopt};
std::tie(mg_graph, std::ignore, mg_renumber_map) =
cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, true>(
*handle_, input_usecase, true, true);
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
}
auto mg_graph_view = mg_graph.view();
// 2. run MG reduce_v
const int hash_bin_count = 5;
const int initial_value = 10;
auto property_initial_value =
cugraph::test::generate<vertex_t, result_t>::initial_value(initial_value);
auto mg_vertex_prop = cugraph::test::generate<vertex_t, result_t>::vertex_property(
*handle_, (*mg_renumber_map), hash_bin_count);
auto property_iter = cugraph::get_dataframe_buffer_begin(mg_vertex_prop);
enum class reduction_type_t { PLUS, MINIMUM, MAXIMUM };
std::array<reduction_type_t, 3> reduction_types = {
reduction_type_t::PLUS, reduction_type_t::MINIMUM, reduction_type_t::MAXIMUM};
std::unordered_map<reduction_type_t, result_t> results;
for (auto reduction_type : reduction_types) {
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.start("MG reduce_v");
}
switch (reduction_type) {
case reduction_type_t::PLUS:
results[reduction_type] = reduce_v(*handle_,
mg_graph_view,
property_iter,
property_initial_value,
cugraph::reduce_op::plus<result_t>{});
break;
case reduction_type_t::MINIMUM:
results[reduction_type] = reduce_v(*handle_,
mg_graph_view,
property_iter,
property_initial_value,
cugraph::reduce_op::minimum<result_t>{});
break;
case reduction_type_t::MAXIMUM:
results[reduction_type] = reduce_v(*handle_,
mg_graph_view,
property_iter,
property_initial_value,
cugraph::reduce_op::maximum<result_t>{});
break;
default: FAIL() << "should not be reached.";
}
if (cugraph::test::g_perf) {
RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle_->get_comms().barrier();
hr_timer.stop();
hr_timer.display_and_clear(std::cout);
}
}
// 3. compare SG & MG results
if (prims_usecase.check_correctness) {
cugraph::graph_t<vertex_t, edge_t, store_transposed, false> sg_graph(*handle_);
std::tie(sg_graph, std::ignore, std::ignore) = cugraph::test::mg_graph_to_sg_graph(
*handle_,
mg_graph_view,
std::optional<cugraph::edge_property_view_t<edge_t, weight_t const*>>{std::nullopt},
std::make_optional<raft::device_span<vertex_t const>>((*mg_renumber_map).data(),
(*mg_renumber_map).size()),
false);
if (handle_->get_comms().get_rank() == 0) {
auto sg_graph_view = sg_graph.view();
auto sg_vertex_prop = cugraph::test::generate<vertex_t, result_t>::vertex_property(
*handle_,
thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_first()),
thrust::make_counting_iterator(sg_graph_view.local_vertex_partition_range_last()),
hash_bin_count);
auto sg_property_iter = cugraph::get_dataframe_buffer_begin(sg_vertex_prop);
for (auto reduction_type : reduction_types) {
result_t expected_result{};
switch (reduction_type) {
case reduction_type_t::PLUS:
expected_result = reduce_v(*handle_,
sg_graph_view,
sg_property_iter,
property_initial_value,
cugraph::reduce_op::plus<result_t>{});
break;
case reduction_type_t::MINIMUM:
expected_result = reduce_v(*handle_,
sg_graph_view,
sg_property_iter,
property_initial_value,
cugraph::reduce_op::minimum<result_t>{});
break;
case reduction_type_t::MAXIMUM:
expected_result = reduce_v(*handle_,
sg_graph_view,
sg_property_iter,
property_initial_value,
cugraph::reduce_op::maximum<result_t>{});
break;
default: FAIL() << "should not be reached.";
}
result_compare<result_t> compare{};
ASSERT_TRUE(compare(expected_result, results[reduction_type]));
}
}
}
}
private:
static std::unique_ptr<raft::handle_t> handle_;
};
template <typename input_usecase_t>
std::unique_ptr<raft::handle_t> Tests_MGReduceV<input_usecase_t>::handle_ = nullptr;
using Tests_MGReduceV_File = Tests_MGReduceV<cugraph::test::File_Usecase>;
using Tests_MGReduceV_Rmat = Tests_MGReduceV<cugraph::test::Rmat_Usecase>;
TEST_P(Tests_MGReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int32_t, float>, false>(
std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int32_t, float>, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int32_t, float>, true>(
std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, thrust::tuple<int32_t, float>, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_File, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int32_t, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int32_t, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt32Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int32_t, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt64Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int32_t, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_File, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int32_t, true>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int32_t, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt32Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int32_t, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MGReduceV_Rmat, CheckInt64Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int32_t, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
INSTANTIATE_TEST_SUITE_P(
file_test,
Tests_MGReduceV_File,
::testing::Combine(
::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"),
cugraph::test::File_Usecase("test/datasets/web-Google.mtx"),
cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"),
cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx"))));
INSTANTIATE_TEST_SUITE_P(rmat_small_test,
Tests_MGReduceV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::Rmat_Usecase(
10, 16, 0.57, 0.19, 0.19, 0, false, false))));
INSTANTIATE_TEST_SUITE_P(
rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with
--gtest_filter to select only the rmat_benchmark_test with a specific
vertex & edge type combination) by command line arguments and do not
include more than one Rmat_Usecase that differ only in scale or edge
factor (to avoid running same benchmarks more than once) */
Tests_MGReduceV_Rmat,
::testing::Combine(
::testing::Values(Prims_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(20, 32, 0.57, 0.19, 0.19, 0, false, false))));
CUGRAPH_MG_TEST_PROGRAM_MAIN()
|
bfda8a1124aacfe851aa9211fdd09fae0031354a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrix_transpose_k1(float* input,float* output,const int nx, const int ny)
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.x*blockDim.x;
//printf("gid : %d , offset : %d , index : %d ,value : %f \n", gid, offset, offset + blockIdx.x,input[offset + blockIdx.x]);
output[gid] = input[offset + blockIdx.x];
} | bfda8a1124aacfe851aa9211fdd09fae0031354a.cu | #include "includes.h"
__global__ void matrix_transpose_k1(float* input,float* output,const int nx, const int ny)
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.x*blockDim.x;
//printf("gid : %d , offset : %d , index : %d ,value : %f \n", gid, offset, offset + blockIdx.x,input[offset + blockIdx.x]);
output[gid] = input[offset + blockIdx.x];
} |
0951de8afe1a8bdc900754f7daeb6485e7bb0907.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//constants
#define n 0.0002
#define p 0.5
#define G 0.75
//parameters
#define SIZE 4
#define NUMBER_OF_ITERATIONS 3
#define ALGORITHM 4
//1 = sequential
//2 = simple parallel
//3 = 1 block N threads
//4 = 1 N blocks 1 thread
#define DEBUG 1
//if DEBUG, there will be printf msgs
__device__ int idx(int i, int j){
return (SIZE * i + j);
}
int idx_seq(int i, int j){
return (SIZE * i + j);
}
__device__ void update(double * u, double * u1, double *u2, int i, int j){
//taken care of by other threads
if(i == 0 || j == 0 || i == SIZE-1 || j == SIZE-1){
return;
}
//middle elements
u[idx(i, j)]= p *
(u1[idx(i-1,j)] + u1[idx(i+1,j)]
+u1[idx(i,j-1)] + u1[idx(i,j+1)]
- 4 * u1[idx(i, j)])
+ 2 * u1[idx(i, j)] - (1-n) * u2[idx(i, j)];
//sides & corners merged
if(j==1){
u[idx(i,0)] = G * u[idx(i, j)];
//top left corner
if(i == 1){
u[idx(0,0)] = G * u[idx(1,0)];
}
//top right corner
if(i == SIZE-2){
u[idx(SIZE-1,0)] = G * u[idx(SIZE-2, 0)];
}
}
if(i==1){
u[idx(0, j)] = G * u[idx(i, j)];
//bottom left corner
if(j==SIZE-2){
u[idx(0,SIZE-1)] = G * u[idx(0, SIZE-2)];
}
}
if(j == SIZE-2){
u[idx(i, SIZE-1)] = G * u[idx(i, j)];
}
if(i == SIZE-2){
u[idx(SIZE-1, j)] = G * u[idx(i, j)];
//bottom right corner
if(j== SIZE-2){
u[idx(SIZE-1, SIZE-1)] = G * u[idx(SIZE-1, SIZE-2)];
}
}
}
__global__ void updateElementThree(double *u, double *u1, double *u2)
{
int j = threadIdx.x;
for(int i=0; i < SIZE; i++)
{
update(u, u1, u2, i, j);
}
}
__global__ void updateElementFour(double *u, double *u1, double *u2)
{
int i = blockIdx.x;
for(int j=0; j < SIZE; j++)
{
update(u, u1, u2, i, j);
}
}
__global__ void updateElementTwo(double *u, double *u1, double *u2)
{
int i = blockIdx.x;
int j = threadIdx.x;
update(u, u1, u2, i, j);
}
void printMatrix(double* u){
printf("\n");
for(int i = 0; i < SIZE * SIZE; i++){
printf("%.3lf", u[i]);
printf("\t");
if((i+1) % SIZE == 0 && i > 0){
printf("\n");
}
}
}
int main(){
//Code is in C, but the nvcc compiler expects C++
//C likes it when you implicitly cast a void* to a *double
//but C++ really doesnt like it when you implicitly cast a void* to a double*
double* u = static_cast<double*>(malloc(sizeof(double) * SIZE * SIZE ));
double* u1 = static_cast<double*>(malloc(sizeof(double) * SIZE * SIZE ));
double* u2 = static_cast<double*>(malloc(sizeof(double) * SIZE * SIZE ));
//initialize to 0
for(int i = 0; i < SIZE * SIZE; i++){
u1[i] = 0;
u2[i] = 0;
}
//hit that drummmm
u1[(SIZE * SIZE/2 + SIZE/2)] = 1;
clock_t start, end;
double cpu_time_used;
if(ALGORITHM == 1)
{
for(int a=0; a < NUMBER_OF_ITERATIONS; a++)
{
for(int i = 1; i <SIZE-1; i++){
for(int j = 1; j <SIZE-1 ; j++){
//middle elements
u[idx_seq(i, j)]= p *
(u1[idx_seq(i-1,j)] + u1[idx_seq(i+1,j)]
+u1[idx_seq(i,j-1)] + u1[idx_seq(i,j+1)]
- 4 * u1[idx_seq(i, j)])
+ 2 * u1[idx_seq(i, j)] - (1-n) * u2[idx_seq(i, j)];
}
}
//sides
for(int i = 1; i < SIZE-1; i++){
u[idx_seq(0, i)] = G * u[idx_seq(1, i)];
u[idx_seq(SIZE-1, i)] = G * u[idx_seq(SIZE-2, i)];
u[idx_seq(i,0)] = G * u[idx_seq(i, 1)];
u[idx_seq(i, SIZE-1)] = G * u[idx_seq(i, SIZE-2)];
}
//corners
u[idx_seq(0,0)] = G * u[idx_seq(1,0)];
u[idx_seq(SIZE-1,0)] = G * u[idx_seq(SIZE-2, 0)];
u[idx_seq(0,SIZE-1)] = G * u[idx_seq(0, SIZE-2)];
u[idx_seq(SIZE-1, SIZE-1)] = G * u[idx_seq(SIZE-1, SIZE-2)];
//update after iterations
for(int i=0; i < SIZE * SIZE; i++){
u2[i] = u1[i];
u1[i] = u[i];
}
if(DEBUG){
if(SIZE > 4){
printf("\n%lf", u[(SIZE * SIZE/2 + SIZE/2)] );
} else {
printMatrix(u);
}
}
}
}
else
{
double* u_dev, *u2_dev;
double *u1_dev;
hipMalloc((void **)&u_dev, SIZE*SIZE *sizeof(double));
hipMalloc((void **)&u1_dev, SIZE*SIZE *sizeof(double));
hipMalloc((void **)&u2_dev, SIZE*SIZE *sizeof(double));
hipMemcpy(u_dev, u, SIZE*SIZE *sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(u1_dev, u1, SIZE*SIZE *sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(u2_dev, u2, SIZE*SIZE *sizeof(double), hipMemcpyHostToDevice);
//hit that dRuMm
u1[(SIZE * SIZE/2 + SIZE/2)] = 1;
start = clock();
for(int i = 0; i < NUMBER_OF_ITERATIONS ; i++){
if(ALGORITHM ==2){
updateElementTwo << <SIZE, SIZE >> > (u_dev, u1_dev, u2_dev);
}
if(ALGORITHM ==3 ){
updateElementThree << <1, SIZE >> > (u_dev, u1_dev, u2_dev);
}
if(ALGORITHM == 4){
updateElementFour << <SIZE, 1 >> > (u_dev, u1_dev, u2_dev);
}
hipDeviceSynchronize();
if(DEBUG){
hipMemcpy(u, u_dev, SIZE*SIZE *sizeof(double), hipMemcpyDeviceToHost);
if(SIZE > 4){
printf("\n%lf", u[(SIZE * SIZE/2 + SIZE/2)] );
} else {
printMatrix(u);
}
}
//ideally, replace this two statements by a
//__global__ function that updates the matrices in parallel
hipMemcpy(u2_dev, u1_dev, SIZE*SIZE *sizeof(double), hipMemcpyDeviceToDevice);
hipMemcpy(u1_dev, u_dev, SIZE*SIZE *sizeof(double), hipMemcpyDeviceToDevice);
}
hipFree(u_dev);
hipFree(u1_dev);
hipFree(u2_dev);
}
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\n\nAlgorithm: ");
switch (ALGORITHM){
case 1:
printf("Sequential\n");
break;
case 2:
printf("Simple parallel\n");
break;
case 3:
printf("1 block with N threads\n");
break;
case 4:
printf("N blocks with 1 thread\n");
break;
default: break;
}
printf("\nExecution time:\t%lf \n\n", cpu_time_used);
free(u);
free(u1);
free(u2);
} | 0951de8afe1a8bdc900754f7daeb6485e7bb0907.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//constants
#define n 0.0002
#define p 0.5
#define G 0.75
//parameters
#define SIZE 4
#define NUMBER_OF_ITERATIONS 3
#define ALGORITHM 4
//1 = sequential
//2 = simple parallel
//3 = 1 block N threads
//4 = 1 N blocks 1 thread
#define DEBUG 1
//if DEBUG, there will be printf msgs
__device__ int idx(int i, int j){
return (SIZE * i + j);
}
int idx_seq(int i, int j){
return (SIZE * i + j);
}
__device__ void update(double * u, double * u1, double *u2, int i, int j){
//taken care of by other threads
if(i == 0 || j == 0 || i == SIZE-1 || j == SIZE-1){
return;
}
//middle elements
u[idx(i, j)]= p *
(u1[idx(i-1,j)] + u1[idx(i+1,j)]
+u1[idx(i,j-1)] + u1[idx(i,j+1)]
- 4 * u1[idx(i, j)])
+ 2 * u1[idx(i, j)] - (1-n) * u2[idx(i, j)];
//sides & corners merged
if(j==1){
u[idx(i,0)] = G * u[idx(i, j)];
//top left corner
if(i == 1){
u[idx(0,0)] = G * u[idx(1,0)];
}
//top right corner
if(i == SIZE-2){
u[idx(SIZE-1,0)] = G * u[idx(SIZE-2, 0)];
}
}
if(i==1){
u[idx(0, j)] = G * u[idx(i, j)];
//bottom left corner
if(j==SIZE-2){
u[idx(0,SIZE-1)] = G * u[idx(0, SIZE-2)];
}
}
if(j == SIZE-2){
u[idx(i, SIZE-1)] = G * u[idx(i, j)];
}
if(i == SIZE-2){
u[idx(SIZE-1, j)] = G * u[idx(i, j)];
//bottom right corner
if(j== SIZE-2){
u[idx(SIZE-1, SIZE-1)] = G * u[idx(SIZE-1, SIZE-2)];
}
}
}
__global__ void updateElementThree(double *u, double *u1, double *u2)
{
int j = threadIdx.x;
for(int i=0; i < SIZE; i++)
{
update(u, u1, u2, i, j);
}
}
__global__ void updateElementFour(double *u, double *u1, double *u2)
{
int i = blockIdx.x;
for(int j=0; j < SIZE; j++)
{
update(u, u1, u2, i, j);
}
}
__global__ void updateElementTwo(double *u, double *u1, double *u2)
{
int i = blockIdx.x;
int j = threadIdx.x;
update(u, u1, u2, i, j);
}
void printMatrix(double* u){
printf("\n");
for(int i = 0; i < SIZE * SIZE; i++){
printf("%.3lf", u[i]);
printf("\t");
if((i+1) % SIZE == 0 && i > 0){
printf("\n");
}
}
}
int main(){
//Code is in C, but the nvcc compiler expects C++
//C likes it when you implicitly cast a void* to a *double
//but C++ really doesnt like it when you implicitly cast a void* to a double*
double* u = static_cast<double*>(malloc(sizeof(double) * SIZE * SIZE ));
double* u1 = static_cast<double*>(malloc(sizeof(double) * SIZE * SIZE ));
double* u2 = static_cast<double*>(malloc(sizeof(double) * SIZE * SIZE ));
//initialize to 0
for(int i = 0; i < SIZE * SIZE; i++){
u1[i] = 0;
u2[i] = 0;
}
//hit that drummmm
u1[(SIZE * SIZE/2 + SIZE/2)] = 1;
clock_t start, end;
double cpu_time_used;
if(ALGORITHM == 1)
{
for(int a=0; a < NUMBER_OF_ITERATIONS; a++)
{
for(int i = 1; i <SIZE-1; i++){
for(int j = 1; j <SIZE-1 ; j++){
//middle elements
u[idx_seq(i, j)]= p *
(u1[idx_seq(i-1,j)] + u1[idx_seq(i+1,j)]
+u1[idx_seq(i,j-1)] + u1[idx_seq(i,j+1)]
- 4 * u1[idx_seq(i, j)])
+ 2 * u1[idx_seq(i, j)] - (1-n) * u2[idx_seq(i, j)];
}
}
//sides
for(int i = 1; i < SIZE-1; i++){
u[idx_seq(0, i)] = G * u[idx_seq(1, i)];
u[idx_seq(SIZE-1, i)] = G * u[idx_seq(SIZE-2, i)];
u[idx_seq(i,0)] = G * u[idx_seq(i, 1)];
u[idx_seq(i, SIZE-1)] = G * u[idx_seq(i, SIZE-2)];
}
//corners
u[idx_seq(0,0)] = G * u[idx_seq(1,0)];
u[idx_seq(SIZE-1,0)] = G * u[idx_seq(SIZE-2, 0)];
u[idx_seq(0,SIZE-1)] = G * u[idx_seq(0, SIZE-2)];
u[idx_seq(SIZE-1, SIZE-1)] = G * u[idx_seq(SIZE-1, SIZE-2)];
//update after iterations
for(int i=0; i < SIZE * SIZE; i++){
u2[i] = u1[i];
u1[i] = u[i];
}
if(DEBUG){
if(SIZE > 4){
printf("\n%lf", u[(SIZE * SIZE/2 + SIZE/2)] );
} else {
printMatrix(u);
}
}
}
}
else
{
double* u_dev, *u2_dev;
double *u1_dev;
cudaMalloc((void **)&u_dev, SIZE*SIZE *sizeof(double));
cudaMalloc((void **)&u1_dev, SIZE*SIZE *sizeof(double));
cudaMalloc((void **)&u2_dev, SIZE*SIZE *sizeof(double));
cudaMemcpy(u_dev, u, SIZE*SIZE *sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(u1_dev, u1, SIZE*SIZE *sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(u2_dev, u2, SIZE*SIZE *sizeof(double), cudaMemcpyHostToDevice);
//hit that dRuMm
u1[(SIZE * SIZE/2 + SIZE/2)] = 1;
start = clock();
for(int i = 0; i < NUMBER_OF_ITERATIONS ; i++){
if(ALGORITHM ==2){
updateElementTwo << <SIZE, SIZE >> > (u_dev, u1_dev, u2_dev);
}
if(ALGORITHM ==3 ){
updateElementThree << <1, SIZE >> > (u_dev, u1_dev, u2_dev);
}
if(ALGORITHM == 4){
updateElementFour << <SIZE, 1 >> > (u_dev, u1_dev, u2_dev);
}
cudaDeviceSynchronize();
if(DEBUG){
cudaMemcpy(u, u_dev, SIZE*SIZE *sizeof(double), cudaMemcpyDeviceToHost);
if(SIZE > 4){
printf("\n%lf", u[(SIZE * SIZE/2 + SIZE/2)] );
} else {
printMatrix(u);
}
}
//ideally, replace this two statements by a
//__global__ function that updates the matrices in parallel
cudaMemcpy(u2_dev, u1_dev, SIZE*SIZE *sizeof(double), cudaMemcpyDeviceToDevice);
cudaMemcpy(u1_dev, u_dev, SIZE*SIZE *sizeof(double), cudaMemcpyDeviceToDevice);
}
cudaFree(u_dev);
cudaFree(u1_dev);
cudaFree(u2_dev);
}
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\n\nAlgorithm: ");
switch (ALGORITHM){
case 1:
printf("Sequential\n");
break;
case 2:
printf("Simple parallel\n");
break;
case 3:
printf("1 block with N threads\n");
break;
case 4:
printf("N blocks with 1 thread\n");
break;
default: break;
}
printf("\nExecution time:\t%lf \n\n", cpu_time_used);
free(u);
free(u1);
free(u2);
} |
c6fd699bff316515d32eb4e3839d453273f8fc1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "utils.h"
#define SIZE 512
__global__ void add(int *a, int *b, int *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
int main(int argc, char* argv[]) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
a = (int*) malloc(SIZE * sizeof(int));
fill_array(a, SIZE);
display_array("a", a);
b = (int*) malloc(SIZE * sizeof(int));
fill_array(b, SIZE);
display_array("b", b);
c = (int*) malloc(SIZE * sizeof(int));
hipMalloc((void**) &d_a, SIZE * sizeof(int));
hipMalloc((void**) &d_b, SIZE * sizeof(int));
hipMalloc((void**) &d_c, SIZE * sizeof(int));
hipMemcpy(d_a, a, SIZE * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, SIZE * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(SIZE), dim3(1), 0, 0, d_a, d_b, d_c);
hipMemcpy(c, d_c, SIZE * sizeof(int), hipMemcpyDeviceToHost);
display_array("c", c);
hipFree(d_c);
hipFree(d_b);
hipFree(d_a);
free(c);
free(b);
free(a);
return 0;
}
| c6fd699bff316515d32eb4e3839d453273f8fc1a.cu | #include <stdio.h>
#include <stdlib.h>
#include "utils.h"
#define SIZE 512
__global__ void add(int *a, int *b, int *c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
int main(int argc, char* argv[]) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
a = (int*) malloc(SIZE * sizeof(int));
fill_array(a, SIZE);
display_array("a", a);
b = (int*) malloc(SIZE * sizeof(int));
fill_array(b, SIZE);
display_array("b", b);
c = (int*) malloc(SIZE * sizeof(int));
cudaMalloc((void**) &d_a, SIZE * sizeof(int));
cudaMalloc((void**) &d_b, SIZE * sizeof(int));
cudaMalloc((void**) &d_c, SIZE * sizeof(int));
cudaMemcpy(d_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice);
add<<<SIZE, 1>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
display_array("c", c);
cudaFree(d_c);
cudaFree(d_b);
cudaFree(d_a);
free(c);
free(b);
free(a);
return 0;
}
|
86a1b763c1260e3316b5208c67a4ee4f5c5135b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#include <sys/time.h>
// Device code
__global__ void kernel(int* d_A, int pitch,int height,int width)
{
for (int c = 0; c < height; ++c) {
for (int r = 0; r < width; ++r) {
int* row = (int*)((char*)d_A + r * pitch);
row[c] = row[c]*row[c];
}
}
}
//Host Code
int main()
{
int* d_A;
size_t pitch;
int *A;
int height,width;
for(int v2=100;v2>=32;v2-=4){
height = width = v2;
int rows = height;
int cols = width;
A = (int *)malloc(rows*cols*sizeof(int));
for (int i = 0; i < rows*cols; i++) A[i] = i;
hipMallocPitch((void**)&d_A, &pitch, width * sizeof(int), height);
hipMemcpy2D(d_A, pitch, A, sizeof(int)*cols, sizeof(int)*cols, rows, hipMemcpyHostToDevice);
for(int v1=32;v1>=1;v1--){
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for(int j=0;j<1000000;j++)
hipLaunchKernelGGL(( kernel), dim3(100), dim3(32), 0, 0, d_A, pitch,height,v1);
gettimeofday(&tv2, NULL);
printf ("%d %d %f\n",v1,v2,
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
hipDeviceSynchronize();
}}
// for(int i=0;i<rows*cols;i++)
// printf("%d %d\n",A[i],d_A[i]);
return 0;
}
| 86a1b763c1260e3316b5208c67a4ee4f5c5135b9.cu | #include<stdio.h>
#include<cuda.h>
#include <sys/time.h>
// Device code
__global__ void kernel(int* d_A, int pitch,int height,int width)
{
for (int c = 0; c < height; ++c) {
for (int r = 0; r < width; ++r) {
int* row = (int*)((char*)d_A + r * pitch);
row[c] = row[c]*row[c];
}
}
}
//Host Code
int main()
{
int* d_A;
size_t pitch;
int *A;
int height,width;
for(int v2=100;v2>=32;v2-=4){
height = width = v2;
int rows = height;
int cols = width;
A = (int *)malloc(rows*cols*sizeof(int));
for (int i = 0; i < rows*cols; i++) A[i] = i;
cudaMallocPitch((void**)&d_A, &pitch, width * sizeof(int), height);
cudaMemcpy2D(d_A, pitch, A, sizeof(int)*cols, sizeof(int)*cols, rows, cudaMemcpyHostToDevice);
for(int v1=32;v1>=1;v1--){
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for(int j=0;j<1000000;j++)
kernel<<<100, 32>>>(d_A, pitch,height,v1);
gettimeofday(&tv2, NULL);
printf ("%d %d %f\n",v1,v2,
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
cudaDeviceSynchronize();
}}
// for(int i=0;i<rows*cols;i++)
// printf("%d %d\n",A[i],d_A[i]);
return 0;
}
|
9f2a4050acb759f2c9a0e964c33a231ea3c40413.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/inq_conv_layer.hpp"
#include <cmath>
namespace caffe {
template <typename Dtype>
__global__ void TPCalc(const int n, Dtype *param, Dtype *mask,
const Dtype threshold_, const int max_quantum_exp_,
const int min_quantum_exp_) {
CUDA_KERNEL_LOOP(i, n) {
if (mask[i] == 1) {
if (param[i] >= threshold_) {
// exp_ won't be larger than max_quantum_exp_, already checked in the
int exp_ = floor(log(4.0 * param[i] / 3.0) / log(2.0));
// CHECK_LE(exp_, max_quantum_exp_) ;
if (exp_ >= min_quantum_exp_) {
param[i] = pow(2.0, exp_);
} else {
param[i] = 0;
}
mask[i] = 0;
} else if (param[i] <= -threshold_) {
int exp_ = floor(log(4.0 * (-param[i]) / 3.0) / log(2.0));
if (exp_ >= min_quantum_exp_) {
param[i] = -pow(2.0, exp_);
} else {
param[i] = 0;
}
mask[i] = 0;
}
}
}
}
template <typename Dtype>
__global__ void CCMaskApply(const int n, const Dtype *wb, const Dtype *mask,
Dtype *wb_t) {
CUDA_KERNEL_LOOP(index, n) { wb_t[index] = wb[index] * mask[index]; }
}
template <typename Dtype>
void INQConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) {
/* for two-power network */
if (this->phase_ == TRAIN) {
if (this->iter_ == 0 && !this->quantized_) {
// Make the corresponding weights & bias into two power form.
if (this->blobs_.size() == 4 && (this->bias_term_)) {
LOG_IF(INFO, Caffe::root_solver()) << this->name() << " ("
<< this->type() << "): "<< " Shaping the weights...";
ComputeQuantumRange(this->blobs_[0].get(), this->blobs_[2].get(),
this->portions_, weight_quantum_values_,
num_weight_quantum_values_, max_weight_quantum_exp_,
min_weight_quantum_exp_);
ShapeIntoTwoPower_cpu(this->blobs_[0].get(), this->blobs_[2].get(),
this->portions_, max_weight_quantum_exp_,
min_weight_quantum_exp_);
LOG_IF(INFO, Caffe::root_solver()) << this->name() << " ("
<< this->type() << "): "<< " Shaping the bias...";
ComputeQuantumRange(this->blobs_[1].get(), this->blobs_[3].get(),
this->portions_, bias_quantum_values_,
num_bias_quantum_values_, max_bias_quantum_exp_,
min_bias_quantum_exp_);
ShapeIntoTwoPower_cpu(this->blobs_[1].get(), this->blobs_[3].get(),
this->portions_, max_bias_quantum_exp_,
min_bias_quantum_exp_);
} else if (this->blobs_.size() == 2 && (!this->bias_term_)) {
LOG_IF(INFO, Caffe::root_solver()) << "ERROR: No bias terms found... but continue...";
LOG_IF(INFO, Caffe::root_solver()) << this->name() << " ("
<< this->type() << "): "<< " Shaping the bias...";
ComputeQuantumRange(this->blobs_[0].get(), this->blobs_[1].get(),
this->portions_, weight_quantum_values_,
num_weight_quantum_values_, max_weight_quantum_exp_,
min_weight_quantum_exp_);
ShapeIntoTwoPower_cpu(this->blobs_[0].get(), this->blobs_[1].get(),
this->portions_, max_weight_quantum_exp_,
min_weight_quantum_exp_);
}
this->quantized_ = true;
}
}
const Dtype *weight = this->blobs_[0]->mutable_gpu_data();
const Dtype *bias = NULL;
if (this->bias_term_) {
bias = this->blobs_[1]->mutable_gpu_data();
}
// Forward calculation with (masked) weight and bias
for (int i = 0; i < bottom.size(); ++i) {
const Dtype *bottom_data = bottom[i]->gpu_data();
Dtype *top_data = top[i]->mutable_gpu_data();
for (int n = 0; n < this->num_; ++n) {
this->forward_gpu_gemm(bottom_data + bottom[i]->offset(n), weight,
top_data + top[i]->offset(n));
if (this->bias_term_) {
this->forward_gpu_bias(top_data + top[i]->offset(n), bias);
}
}
}
}
template <typename Dtype>
void INQConvolutionLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
// LOG_IF(INFO, Caffe::root_solver()) << "Starting Backward in tp_conv... [gpu]" ;
const Dtype *weight = this->blobs_[0]->mutable_gpu_data();
const Dtype *weightMask = this->blobs_[2]->gpu_data();
Dtype *weight_diff = this->blobs_[0]->mutable_gpu_diff();
for (int i = 0; i < top.size(); ++i) {
const Dtype *top_diff = top[i]->gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
const Dtype *biasMask = this->blobs_[3]->gpu_data();
Dtype *bias_diff = this->blobs_[1]->mutable_gpu_diff();
hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[3]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
this->blobs_[3]->count(), bias_diff, biasMask, bias_diff);
CUDA_POST_KERNEL_CHECK;
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n));
}
// LOG_IF(INFO, Caffe::root_solver()) << "bias_diff Backwarded in tp_conv... [gpu]";
}
if (this->param_propagate_down_[0] || propagate_down[i]) {
const Dtype *bottom_data = bottom[i]->gpu_data();
Dtype *bottom_diff = bottom[i]->mutable_gpu_diff();
hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[2]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
this->blobs_[2]->count(), weight_diff, weightMask, weight_diff);
CUDA_POST_KERNEL_CHECK;
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_gpu_gemm(bottom_data + bottom[i]->offset(n),
top_diff + top[i]->offset(n), weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i]) {
this->backward_gpu_gemm(top_diff + top[i]->offset(n), weight,
bottom_diff + bottom[i]->offset(n));
}
}
}
}
// LOG_IF(INFO, Caffe::root_solver()) << "Backward finished in tp_conv... [gpu]";
}
/*
template <typename Dtype>
void INQConvolutionLayer<Dtype>::ComputeQuantumRange(
const Blob<Dtype> *blob, const Blob<Dtype> *blob_mask,
const vector<float> portions, vector<Dtype> &quantum_values,
const int &num_quantum_values, int &max_quantum_exp_,
int &min_quantum_exp_) {
quantum_values.resize(2 * num_quantum_values + 1);
const Dtype *values = blob->cpu_data();
const Dtype *mask = blob_mask->cpu_data();
Dtype max_value_tobe_quantized = INT_MIN;
Dtype max_value_quantized = INT_MIN;
int updated = 0;
for (unsigned int k = 0; k < blob->count(); ++k) {
if (mask[k] == 1) {
if (fabs(values[k]) > max_value_tobe_quantized) {
max_value_tobe_quantized = fabs(values[k]);
}
} else if (mask[k] == 0) {
if (fabs(values[k]) > max_value_quantized) {
max_value_quantized = fabs(values[k]);
}
++updated;
} else {
LOG(ERROR) << "Mask value is not 0, nor 1, in tp_inner_product_layer";
}
}
if (max_value_quantized != INT_MIN) {
// normal situation
CHECK_GT(updated, 0) << "max_value_quantized is not 0.0, but updated is "
"0!";
max_quantum_exp_ = round(log(max_value_quantized) / log(2.0));
int max_tobe_quantized_exp_ =
floor(log(4.0 * max_value_tobe_quantized / 3.0) / log(2.0));
CHECK_GE(max_quantum_exp_, max_tobe_quantized_exp_);
} else {
if (updated == 0) {
// normal situation (nothing quantized yet)
LOG_IF(INFO, portions_[0] != 0) << "Warning: nothing quantized yet, "
"portion should probably start with "
"0%%!";
max_quantum_exp_ =
floor(log(4.0 * max_value_tobe_quantized / 3.0) / log(2.0));
} else { // DNS model (max_value_quantized ==0 && update != 0)
max_quantum_exp_ =
floor(log(4.0 * max_value_tobe_quantized / 3.0) / log(2.0));
}
}
//
// if (portions[0] == 0) {
// CHECK_EQ(updated, 0) << updated
// << " updated values while there should be none!";
// max_quantum_exp_ =
// floor(log(4.0 * max_value_tobe_quantized / 3.0) / log(2.0));
// }
// else {
// max_quantum_exp_ = round(log(max_value_quantized) / log(2.0));
// int max_tobe_quantized_exp_ =
// floor(log(4.0 * max_value_tobe_quantized / 3.0) / log(2.0));
// CHECK_LE(max_tobe_quantized_exp_, max_quantum_exp_)
// << "New quantum exp is greater than the one already got!";
// }
//
min_quantum_exp_ = max_quantum_exp_ - num_quantum_values + 1;
std::cout << "Max_power = " << max_quantum_exp_ << std::endl;
std::cout << "Min_power = " << min_quantum_exp_ << std::endl;
for (unsigned int k = 0; k < num_quantum_values; ++k) {
quantum_values[k] = pow(2.0, max_quantum_exp_ - k);
quantum_values[2 * num_quantum_values - k] = -quantum_values[k];
}
quantum_values[num_quantum_values] = 0;
}
*/
/*
template <typename Dtype>
void INQConvolutionLayer<Dtype>::ShapeIntoTwoPower_gpu(
Blob<Dtype> *input_blob, Blob<Dtype> *mask_blob,
const vector<float> &portions, const int &max_quantum_exp_,
const int &min_quantum_exp_) {
const float previous_portion = portions[0];
const float current_portion = portions[1];
if (current_portion == 0) {
LOG_IF(INFO, Caffe::root_solver()) << "Current portion equals 0.0%, skipping ...";
return;
}
if ( max_quantum_exp_ == -100) {
LOG_IF(INFO, Caffe::root_solver()) << "All parameters already pruned away, skipping ...";
return;
}
// parameter statistics
const Dtype *param_cpu = input_blob->cpu_data();
Dtype *param = input_blob->mutable_gpu_data();
Dtype *mask = mask_blob->mutable_gpu_data();
int count = input_blob->count();
int num_not_yet_quantized = 0;
vector<Dtype> sorted_param;
for (int i = 0; i < count; ++i) {
if (mask[i] == 1) {
++num_not_yet_quantized;
sorted_param.push_back(fabs(param_cpu[i]));
}
}
// just an estimation
int num_init_not_quantized =
round(Dtype(num_not_yet_quantized) / (1.0 - previous_portion));
int num_not_tobe_quantized =
round(num_init_not_quantized * (1.0 - current_portion));
int num_tobe_update = num_not_yet_quantized - num_not_tobe_quantized;
LOG_IF(INFO, Caffe::root_solver()) << "portions: " << previous_portion * 100 <<"% -> "
<< current_portion * 100 << "% ("
<< "total: "
<< Dtype(count-num_not_yet_quantized)/count*100 << "% -> "
<< Dtype(count-num_not_tobe_quantized)/count*100<< "%"
<< ")";
LOG_IF(INFO, Caffe::root_solver()) << "init_not_quantized/total: "
<< num_init_not_quantized << "/"
<< count;
LOG_IF(INFO, Caffe::root_solver()) << "to_update/not_tobe_quantized/not_yet_quantized: "
<< num_tobe_update << "/"
<< num_not_tobe_quantized << "/"
<< num_not_yet_quantized ;
if (num_tobe_update > 0) {
sort(sorted_param.begin(), sorted_param.end());
Dtype threshold_ = sorted_param[num_not_tobe_quantized];
TPCalc<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, param, mask, threshold_, max_quantum_exp_, min_quantum_exp_);
CUDA_POST_KERNEL_CHECK;
// LOG_IF(INFO, Caffe::root_solver()) << "Shaping finished in INQ_conv... [gpu]";
}
}
*/
INSTANTIATE_LAYER_GPU_FUNCS(INQConvolutionLayer);
} // namespace caffe
| 9f2a4050acb759f2c9a0e964c33a231ea3c40413.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/inq_conv_layer.hpp"
#include <cmath>
namespace caffe {
template <typename Dtype>
__global__ void TPCalc(const int n, Dtype *param, Dtype *mask,
const Dtype threshold_, const int max_quantum_exp_,
const int min_quantum_exp_) {
CUDA_KERNEL_LOOP(i, n) {
if (mask[i] == 1) {
if (param[i] >= threshold_) {
// exp_ won't be larger than max_quantum_exp_, already checked in the
int exp_ = floor(log(4.0 * param[i] / 3.0) / log(2.0));
// CHECK_LE(exp_, max_quantum_exp_) ;
if (exp_ >= min_quantum_exp_) {
param[i] = pow(2.0, exp_);
} else {
param[i] = 0;
}
mask[i] = 0;
} else if (param[i] <= -threshold_) {
int exp_ = floor(log(4.0 * (-param[i]) / 3.0) / log(2.0));
if (exp_ >= min_quantum_exp_) {
param[i] = -pow(2.0, exp_);
} else {
param[i] = 0;
}
mask[i] = 0;
}
}
}
}
template <typename Dtype>
__global__ void CCMaskApply(const int n, const Dtype *wb, const Dtype *mask,
Dtype *wb_t) {
CUDA_KERNEL_LOOP(index, n) { wb_t[index] = wb[index] * mask[index]; }
}
template <typename Dtype>
void INQConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) {
/* for two-power network */
if (this->phase_ == TRAIN) {
if (this->iter_ == 0 && !this->quantized_) {
// Make the corresponding weights & bias into two power form.
if (this->blobs_.size() == 4 && (this->bias_term_)) {
LOG_IF(INFO, Caffe::root_solver()) << this->name() << " ("
<< this->type() << "): "<< " Shaping the weights...";
ComputeQuantumRange(this->blobs_[0].get(), this->blobs_[2].get(),
this->portions_, weight_quantum_values_,
num_weight_quantum_values_, max_weight_quantum_exp_,
min_weight_quantum_exp_);
ShapeIntoTwoPower_cpu(this->blobs_[0].get(), this->blobs_[2].get(),
this->portions_, max_weight_quantum_exp_,
min_weight_quantum_exp_);
LOG_IF(INFO, Caffe::root_solver()) << this->name() << " ("
<< this->type() << "): "<< " Shaping the bias...";
ComputeQuantumRange(this->blobs_[1].get(), this->blobs_[3].get(),
this->portions_, bias_quantum_values_,
num_bias_quantum_values_, max_bias_quantum_exp_,
min_bias_quantum_exp_);
ShapeIntoTwoPower_cpu(this->blobs_[1].get(), this->blobs_[3].get(),
this->portions_, max_bias_quantum_exp_,
min_bias_quantum_exp_);
} else if (this->blobs_.size() == 2 && (!this->bias_term_)) {
LOG_IF(INFO, Caffe::root_solver()) << "ERROR: No bias terms found... but continue...";
LOG_IF(INFO, Caffe::root_solver()) << this->name() << " ("
<< this->type() << "): "<< " Shaping the bias...";
ComputeQuantumRange(this->blobs_[0].get(), this->blobs_[1].get(),
this->portions_, weight_quantum_values_,
num_weight_quantum_values_, max_weight_quantum_exp_,
min_weight_quantum_exp_);
ShapeIntoTwoPower_cpu(this->blobs_[0].get(), this->blobs_[1].get(),
this->portions_, max_weight_quantum_exp_,
min_weight_quantum_exp_);
}
this->quantized_ = true;
}
}
const Dtype *weight = this->blobs_[0]->mutable_gpu_data();
const Dtype *bias = NULL;
if (this->bias_term_) {
bias = this->blobs_[1]->mutable_gpu_data();
}
// Forward calculation with (masked) weight and bias
for (int i = 0; i < bottom.size(); ++i) {
const Dtype *bottom_data = bottom[i]->gpu_data();
Dtype *top_data = top[i]->mutable_gpu_data();
for (int n = 0; n < this->num_; ++n) {
this->forward_gpu_gemm(bottom_data + bottom[i]->offset(n), weight,
top_data + top[i]->offset(n));
if (this->bias_term_) {
this->forward_gpu_bias(top_data + top[i]->offset(n), bias);
}
}
}
}
template <typename Dtype>
void INQConvolutionLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
// LOG_IF(INFO, Caffe::root_solver()) << "Starting Backward in tp_conv... [gpu]" ;
const Dtype *weight = this->blobs_[0]->mutable_gpu_data();
const Dtype *weightMask = this->blobs_[2]->gpu_data();
Dtype *weight_diff = this->blobs_[0]->mutable_gpu_diff();
for (int i = 0; i < top.size(); ++i) {
const Dtype *top_diff = top[i]->gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
const Dtype *biasMask = this->blobs_[3]->gpu_data();
Dtype *bias_diff = this->blobs_[1]->mutable_gpu_diff();
CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[3]->count()),
CAFFE_CUDA_NUM_THREADS>>>(
this->blobs_[3]->count(), bias_diff, biasMask, bias_diff);
CUDA_POST_KERNEL_CHECK;
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n));
}
// LOG_IF(INFO, Caffe::root_solver()) << "bias_diff Backwarded in tp_conv... [gpu]";
}
if (this->param_propagate_down_[0] || propagate_down[i]) {
const Dtype *bottom_data = bottom[i]->gpu_data();
Dtype *bottom_diff = bottom[i]->mutable_gpu_diff();
CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[2]->count()),
CAFFE_CUDA_NUM_THREADS>>>(
this->blobs_[2]->count(), weight_diff, weightMask, weight_diff);
CUDA_POST_KERNEL_CHECK;
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_gpu_gemm(bottom_data + bottom[i]->offset(n),
top_diff + top[i]->offset(n), weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i]) {
this->backward_gpu_gemm(top_diff + top[i]->offset(n), weight,
bottom_diff + bottom[i]->offset(n));
}
}
}
}
// LOG_IF(INFO, Caffe::root_solver()) << "Backward finished in tp_conv... [gpu]";
}
/*
template <typename Dtype>
void INQConvolutionLayer<Dtype>::ComputeQuantumRange(
const Blob<Dtype> *blob, const Blob<Dtype> *blob_mask,
const vector<float> portions, vector<Dtype> &quantum_values,
const int &num_quantum_values, int &max_quantum_exp_,
int &min_quantum_exp_) {
quantum_values.resize(2 * num_quantum_values + 1);
const Dtype *values = blob->cpu_data();
const Dtype *mask = blob_mask->cpu_data();
Dtype max_value_tobe_quantized = INT_MIN;
Dtype max_value_quantized = INT_MIN;
int updated = 0;
for (unsigned int k = 0; k < blob->count(); ++k) {
if (mask[k] == 1) {
if (fabs(values[k]) > max_value_tobe_quantized) {
max_value_tobe_quantized = fabs(values[k]);
}
} else if (mask[k] == 0) {
if (fabs(values[k]) > max_value_quantized) {
max_value_quantized = fabs(values[k]);
}
++updated;
} else {
LOG(ERROR) << "Mask value is not 0, nor 1, in tp_inner_product_layer";
}
}
if (max_value_quantized != INT_MIN) {
// normal situation
CHECK_GT(updated, 0) << "max_value_quantized is not 0.0, but updated is "
"0!";
max_quantum_exp_ = round(log(max_value_quantized) / log(2.0));
int max_tobe_quantized_exp_ =
floor(log(4.0 * max_value_tobe_quantized / 3.0) / log(2.0));
CHECK_GE(max_quantum_exp_, max_tobe_quantized_exp_);
} else {
if (updated == 0) {
// normal situation (nothing quantized yet)
LOG_IF(INFO, portions_[0] != 0) << "Warning: nothing quantized yet, "
"portion should probably start with "
"0%%!";
max_quantum_exp_ =
floor(log(4.0 * max_value_tobe_quantized / 3.0) / log(2.0));
} else { // DNS model (max_value_quantized ==0 && update != 0)
max_quantum_exp_ =
floor(log(4.0 * max_value_tobe_quantized / 3.0) / log(2.0));
}
}
//
// if (portions[0] == 0) {
// CHECK_EQ(updated, 0) << updated
// << " updated values while there should be none!";
// max_quantum_exp_ =
// floor(log(4.0 * max_value_tobe_quantized / 3.0) / log(2.0));
// }
// else {
// max_quantum_exp_ = round(log(max_value_quantized) / log(2.0));
// int max_tobe_quantized_exp_ =
// floor(log(4.0 * max_value_tobe_quantized / 3.0) / log(2.0));
// CHECK_LE(max_tobe_quantized_exp_, max_quantum_exp_)
// << "New quantum exp is greater than the one already got!";
// }
//
min_quantum_exp_ = max_quantum_exp_ - num_quantum_values + 1;
std::cout << "Max_power = " << max_quantum_exp_ << std::endl;
std::cout << "Min_power = " << min_quantum_exp_ << std::endl;
for (unsigned int k = 0; k < num_quantum_values; ++k) {
quantum_values[k] = pow(2.0, max_quantum_exp_ - k);
quantum_values[2 * num_quantum_values - k] = -quantum_values[k];
}
quantum_values[num_quantum_values] = 0;
}
*/
/*
template <typename Dtype>
void INQConvolutionLayer<Dtype>::ShapeIntoTwoPower_gpu(
Blob<Dtype> *input_blob, Blob<Dtype> *mask_blob,
const vector<float> &portions, const int &max_quantum_exp_,
const int &min_quantum_exp_) {
const float previous_portion = portions[0];
const float current_portion = portions[1];
if (current_portion == 0) {
LOG_IF(INFO, Caffe::root_solver()) << "Current portion equals 0.0%, skipping ...";
return;
}
if ( max_quantum_exp_ == -100) {
LOG_IF(INFO, Caffe::root_solver()) << "All parameters already pruned away, skipping ...";
return;
}
// parameter statistics
const Dtype *param_cpu = input_blob->cpu_data();
Dtype *param = input_blob->mutable_gpu_data();
Dtype *mask = mask_blob->mutable_gpu_data();
int count = input_blob->count();
int num_not_yet_quantized = 0;
vector<Dtype> sorted_param;
for (int i = 0; i < count; ++i) {
if (mask[i] == 1) {
++num_not_yet_quantized;
sorted_param.push_back(fabs(param_cpu[i]));
}
}
// just an estimation
int num_init_not_quantized =
round(Dtype(num_not_yet_quantized) / (1.0 - previous_portion));
int num_not_tobe_quantized =
round(num_init_not_quantized * (1.0 - current_portion));
int num_tobe_update = num_not_yet_quantized - num_not_tobe_quantized;
LOG_IF(INFO, Caffe::root_solver()) << "portions: " << previous_portion * 100 <<"% -> "
<< current_portion * 100 << "% ("
<< "total: "
<< Dtype(count-num_not_yet_quantized)/count*100 << "% -> "
<< Dtype(count-num_not_tobe_quantized)/count*100<< "%"
<< ")";
LOG_IF(INFO, Caffe::root_solver()) << "init_not_quantized/total: "
<< num_init_not_quantized << "/"
<< count;
LOG_IF(INFO, Caffe::root_solver()) << "to_update/not_tobe_quantized/not_yet_quantized: "
<< num_tobe_update << "/"
<< num_not_tobe_quantized << "/"
<< num_not_yet_quantized ;
if (num_tobe_update > 0) {
sort(sorted_param.begin(), sorted_param.end());
Dtype threshold_ = sorted_param[num_not_tobe_quantized];
TPCalc<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, param, mask, threshold_, max_quantum_exp_, min_quantum_exp_);
CUDA_POST_KERNEL_CHECK;
// LOG_IF(INFO, Caffe::root_solver()) << "Shaping finished in INQ_conv... [gpu]";
}
}
*/
INSTANTIATE_LAYER_GPU_FUNCS(INQConvolutionLayer);
} // namespace caffe
|
1884809d8466e7b574a8c80e13fa2cd7c5beff55.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
* Third party copyrights are property of their respective owners.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id: $
* Ported to PCL by Koen Buys : Attention Work in progress!
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "NCV.hpp"
#include "NCVAlg.hpp"
#include "NCVPyramid.hpp"
#include "NCVPixelOperations.hpp"
//#include "opencv2/gpu/device/common.hpp"
template<typename T, Ncv32u CN> struct __average4_CN {static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11);};
template<typename T> struct __average4_CN<T, 1> {
static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11)
{
T out;
out.x = ((Ncv32s)p00.x + p01.x + p10.x + p11.x + 2) / 4;
return out;
}};
template<> struct __average4_CN<float1, 1> {
static __host__ __device__ float1 _average4_CN(const float1 &p00, const float1 &p01, const float1 &p10, const float1 &p11)
{
float1 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
return out;
}};
template<> struct __average4_CN<double1, 1> {
static __host__ __device__ double1 _average4_CN(const double1 &p00, const double1 &p01, const double1 &p10, const double1 &p11)
{
double1 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
return out;
}};
template<typename T> struct __average4_CN<T, 3> {
static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11)
{
T out;
out.x = ((Ncv32s)p00.x + p01.x + p10.x + p11.x + 2) / 4;
out.y = ((Ncv32s)p00.y + p01.y + p10.y + p11.y + 2) / 4;
out.z = ((Ncv32s)p00.z + p01.z + p10.z + p11.z + 2) / 4;
return out;
}};
template<> struct __average4_CN<float3, 3> {
static __host__ __device__ float3 _average4_CN(const float3 &p00, const float3 &p01, const float3 &p10, const float3 &p11)
{
float3 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
return out;
}};
template<> struct __average4_CN<double3, 3> {
static __host__ __device__ double3 _average4_CN(const double3 &p00, const double3 &p01, const double3 &p10, const double3 &p11)
{
double3 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
return out;
}};
template<typename T> struct __average4_CN<T, 4> {
static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11)
{
T out;
out.x = ((Ncv32s)p00.x + p01.x + p10.x + p11.x + 2) / 4;
out.y = ((Ncv32s)p00.y + p01.y + p10.y + p11.y + 2) / 4;
out.z = ((Ncv32s)p00.z + p01.z + p10.z + p11.z + 2) / 4;
out.w = ((Ncv32s)p00.w + p01.w + p10.w + p11.w + 2) / 4;
return out;
}};
template<> struct __average4_CN<float4, 4> {
static __host__ __device__ float4 _average4_CN(const float4 &p00, const float4 &p01, const float4 &p10, const float4 &p11)
{
float4 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
out.w = (p00.w + p01.w + p10.w + p11.w) / 4;
return out;
}};
template<> struct __average4_CN<double4, 4> {
static __host__ __device__ double4 _average4_CN(const double4 &p00, const double4 &p01, const double4 &p10, const double4 &p11)
{
double4 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
out.w = (p00.w + p01.w + p10.w + p11.w) / 4;
return out;
}};
template<typename T> static __host__ __device__ T _average4(const T &p00, const T &p01, const T &p10, const T &p11)
{
return __average4_CN<T, NC(T)>::_average4_CN(p00, p01, p10, p11);
}
template<typename Tin, typename Tout, Ncv32u CN> struct __lerp_CN {static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d);};
template<typename Tin, typename Tout> struct __lerp_CN<Tin, Tout, 1> {
static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d)
{
using TB = typename TConvVec2Base<Tout>::TBase;
return _pixMake(TB(b.x * d + a.x * (1 - d)));
}};
template<typename Tin, typename Tout> struct __lerp_CN<Tin, Tout, 3> {
static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d)
{
using TB = typename TConvVec2Base<Tout>::TBase;
return _pixMake(TB(b.x * d + a.x * (1 - d)),
TB(b.y * d + a.y * (1 - d)),
TB(b.z * d + a.z * (1 - d)));
}};
template<typename Tin, typename Tout> struct __lerp_CN<Tin, Tout, 4> {
static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d)
{
using TB = typename TConvVec2Base<Tout>::TBase;
return _pixMake(TB(b.x * d + a.x * (1 - d)),
TB(b.y * d + a.y * (1 - d)),
TB(b.z * d + a.z * (1 - d)),
TB(b.w * d + a.w * (1 - d)));
}};
template<typename Tin, typename Tout> static __host__ __device__ Tout _lerp(const Tin &a, const Tin &b, Ncv32f d)
{
return __lerp_CN<Tin, Tout, NC(Tin)>::_lerp_CN(a, b, d);
}
template<typename T>
__global__ void kernelDownsampleX2(T *d_src,
Ncv32u srcPitch,
T *d_dst,
Ncv32u dstPitch,
NcvSize32u dstRoi)
{
Ncv32u i = blockIdx.y * blockDim.y + threadIdx.y;
Ncv32u j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dstRoi.height && j < dstRoi.width)
{
T *d_src_line1 = (T *)((Ncv8u *)d_src + (2 * i + 0) * srcPitch);
T *d_src_line2 = (T *)((Ncv8u *)d_src + (2 * i + 1) * srcPitch);
T *d_dst_line = (T *)((Ncv8u *)d_dst + i * dstPitch);
T p00 = d_src_line1[2*j+0];
T p01 = d_src_line1[2*j+1];
T p10 = d_src_line2[2*j+0];
T p11 = d_src_line2[2*j+1];
d_dst_line[j] = _average4(p00, p01, p10, p11);
}
}
/*
namespace cv { namespace gpu { namespace device
{
namespace pyramid
{
template <typename T> void kernelDownsampleX2_gpu(DevMem2Db src, DevMem2Db dst, hipStream_t stream)
{
dim3 bDim(16, 8);
dim3 gDim(divUp(src.cols, bDim.x), divUp(src.rows, bDim.y));
kernelDownsampleX2<<<gDim, bDim, 0, stream>>>((T*)src.data, src.step, (T*)dst.data, dst.step, NcvSize32u(dst.cols, dst.rows));
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void kernelDownsampleX2_gpu<uchar1>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<uchar3>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<uchar4>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<ushort1>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<ushort3>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<ushort4>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<float1>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<float3>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<float4>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
}
}}} */
template<typename T>
__global__ void kernelInterpolateFrom1(T *d_srcTop,
Ncv32u srcTopPitch,
NcvSize32u szTopRoi,
T *d_dst,
Ncv32u dstPitch,
NcvSize32u dstRoi)
{
Ncv32u i = blockIdx.y * blockDim.y + threadIdx.y;
Ncv32u j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dstRoi.height && j < dstRoi.width)
{
Ncv32f ptTopX = 1.0f * (szTopRoi.width - 1) * j / (dstRoi.width - 1);
Ncv32f ptTopY = 1.0f * (szTopRoi.height - 1) * i / (dstRoi.height - 1);
Ncv32u xl = (Ncv32u)ptTopX;
Ncv32u xh = xl+1;
Ncv32f dx = ptTopX - xl;
Ncv32u yl = (Ncv32u)ptTopY;
Ncv32u yh = yl+1;
Ncv32f dy = ptTopY - yl;
T *d_src_line1 = (T *)((Ncv8u *)d_srcTop + yl * srcTopPitch);
T *d_src_line2 = (T *)((Ncv8u *)d_srcTop + yh * srcTopPitch);
T *d_dst_line = (T *)((Ncv8u *)d_dst + i * dstPitch);
T p00, p01, p10, p11;
p00 = d_src_line1[xl];
p01 = xh < szTopRoi.width ? d_src_line1[xh] : p00;
p10 = yh < szTopRoi.height ? d_src_line2[xl] : p00;
p11 = (xh < szTopRoi.width && yh < szTopRoi.height) ? d_src_line2[xh] : p00;
using TVFlt = typename TConvBase2Vec<Ncv32f, NC(T)>::TVec;
TVFlt m_00_01 = _lerp<T, TVFlt>(p00, p01, dx);
TVFlt m_10_11 = _lerp<T, TVFlt>(p10, p11, dx);
TVFlt mixture = _lerp<TVFlt, TVFlt>(m_00_01, m_10_11, dy);
T outPix = _pixDemoteClampZ<TVFlt, T>(mixture);
d_dst_line[j] = outPix;
}
}
/*
namespace cv { namespace gpu { namespace device
{
namespace pyramid
{
template <typename T> void kernelInterpolateFrom1_gpu(DevMem2Db src, DevMem2Db dst, hipStream_t stream)
{
dim3 bDim(16, 8);
dim3 gDim(divUp(dst.cols, bDim.x), divUp(dst.rows, bDim.y));
kernelInterpolateFrom1<<<gDim, bDim, 0, stream>>>((T*) src.data, src.step, NcvSize32u(src.cols, src.rows),
(T*) dst.data, dst.step, NcvSize32u(dst.cols, dst.rows));
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void kernelInterpolateFrom1_gpu<uchar1>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<uchar3>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<uchar4>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<ushort1>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<ushort3>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<ushort4>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<float1>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<float3>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<float4>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
}
}}} */
#if 0 //def _WIN32
template<typename T>
static T _interpLinear(const T &a, const T &b, Ncv32f d)
{
using TVFlt = typename TConvBase2Vec<Ncv32f, NC(T)>::TVec;
TVFlt tmp = _lerp<T, TVFlt>(a, b, d);
return _pixDemoteClampZ<TVFlt, T>(tmp);
}
template<typename T>
static T _interpBilinear(const NCVMatrix<T> &refLayer, Ncv32f x, Ncv32f y)
{
Ncv32u xl = (Ncv32u)x;
Ncv32u xh = xl+1;
Ncv32f dx = x - xl;
Ncv32u yl = (Ncv32u)y;
Ncv32u yh = yl+1;
Ncv32f dy = y - yl;
T p00, p01, p10, p11;
p00 = refLayer.at(xl, yl);
p01 = xh < refLayer.width() ? refLayer.at(xh, yl) : p00;
p10 = yh < refLayer.height() ? refLayer.at(xl, yh) : p00;
p11 = (xh < refLayer.width() && yh < refLayer.height()) ? refLayer.at(xh, yh) : p00;
using TVFlt = typename TConvBase2Vec<Ncv32f, NC(T)>::TVec;
TVFlt m_00_01 = _lerp<T, TVFlt>(p00, p01, dx);
TVFlt m_10_11 = _lerp<T, TVFlt>(p10, p11, dx);
TVFlt mixture = _lerp<TVFlt, TVFlt>(m_00_01, m_10_11, dy);
return _pixDemoteClampZ<TVFlt, T>(mixture);
}
template <class T>
NCVImagePyramid<T>::NCVImagePyramid(const NCVMatrix<T> &img,
Ncv8u numLayers,
INCVMemAllocator &alloc,
hipStream_t cuStream)
{
this->_isInitialized = false;
ncvAssertPrintReturn(img.memType() == alloc.memType(), "NCVImagePyramid::ctor error", );
this->layer0 = &img;
NcvSize32u szLastLayer(img.width(), img.height());
this->nLayers = 1;
NCV_SET_SKIP_COND(alloc.isCounting());
NcvBool bDeviceCode = alloc.memType() == NCVMemoryTypeDevice;
if (numLayers == 0)
{
numLayers = 255; //it will cut-off when any of the dimensions goes 1
}
#ifdef SELF_CHECK_GPU
NCVMemNativeAllocator allocCPU(NCVMemoryTypeHostPinned, 512);
#endif
for (Ncv32u i=0; i<(Ncv32u)numLayers-1; i++)
{
NcvSize32u szCurLayer(szLastLayer.width / 2, szLastLayer.height / 2);
if (szCurLayer.width == 0 || szCurLayer.height == 0)
{
break;
}
this->pyramid.push_back(new NCVMatrixAlloc<T>(alloc, szCurLayer.width, szCurLayer.height));
ncvAssertPrintReturn(((NCVMatrixAlloc<T> *)(this->pyramid[i]))->isMemAllocated(), "NCVImagePyramid::ctor error", );
this->nLayers++;
//fill in the layer
NCV_SKIP_COND_BEGIN
const NCVMatrix<T> *prevLayer = i == 0 ? this->layer0 : this->pyramid[i-1];
NCVMatrix<T> *curLayer = this->pyramid[i];
if (bDeviceCode)
{
dim3 bDim(16, 8);
dim3 gDim(divUp(szCurLayer.width, bDim.x), divUp(szCurLayer.height, bDim.y));
hipLaunchKernelGGL(( kernelDownsampleX2), dim3(gDim), dim3(bDim), 0, cuStream, prevLayer->ptr(),
prevLayer->pitch(),
curLayer->ptr(),
curLayer->pitch(),
szCurLayer);
ncvAssertPrintReturn(hipSuccess == hipGetLastError(), "NCVImagePyramid::ctor error", );
#ifdef SELF_CHECK_GPU
NCVMatrixAlloc<T> h_prevLayer(allocCPU, prevLayer->width(), prevLayer->height());
ncvAssertPrintReturn(h_prevLayer.isMemAllocated(), "Validation failure in NCVImagePyramid::ctor", );
NCVMatrixAlloc<T> h_curLayer(allocCPU, curLayer->width(), curLayer->height());
ncvAssertPrintReturn(h_curLayer.isMemAllocated(), "Validation failure in NCVImagePyramid::ctor", );
ncvAssertPrintReturn(NCV_SUCCESS == prevLayer->copy2D(h_prevLayer, prevLayer->size(), cuStream), "Validation failure in NCVImagePyramid::ctor", );
ncvAssertPrintReturn(NCV_SUCCESS == curLayer->copy2D(h_curLayer, curLayer->size(), cuStream), "Validation failure in NCVImagePyramid::ctor", );
ncvAssertPrintReturn(hipSuccess == hipStreamSynchronize(cuStream), "Validation failure in NCVImagePyramid::ctor", );
for (Ncv32u i=0; i<szCurLayer.height; i++)
{
for (Ncv32u j=0; j<szCurLayer.width; j++)
{
T p00 = h_prevLayer.at(2*j+0, 2*i+0);
T p01 = h_prevLayer.at(2*j+1, 2*i+0);
T p10 = h_prevLayer.at(2*j+0, 2*i+1);
T p11 = h_prevLayer.at(2*j+1, 2*i+1);
T outGold = _average4(p00, p01, p10, p11);
T outGPU = h_curLayer.at(j, i);
ncvAssertPrintReturn(0 == memcmp(&outGold, &outGPU, sizeof(T)), "Validation failure in NCVImagePyramid::ctor with kernelDownsampleX2", );
}
}
#endif
}
else
{
for (Ncv32u i=0; i<szCurLayer.height; i++)
{
for (Ncv32u j=0; j<szCurLayer.width; j++)
{
T p00 = prevLayer->at(2*j+0, 2*i+0);
T p01 = prevLayer->at(2*j+1, 2*i+0);
T p10 = prevLayer->at(2*j+0, 2*i+1);
T p11 = prevLayer->at(2*j+1, 2*i+1);
curLayer->at(j, i) = _average4(p00, p01, p10, p11);
}
}
}
NCV_SKIP_COND_END
szLastLayer = szCurLayer;
}
this->_isInitialized = true;
}
template <class T>
NCVImagePyramid<T>::~NCVImagePyramid()
{
}
template <class T>
NcvBool NCVImagePyramid<T>::isInitialized() const
{
return this->_isInitialized;
}
template <class T>
NCVStatus NCVImagePyramid<T>::getLayer(NCVMatrix<T> &outImg,
NcvSize32u outRoi,
NcvBool bTrilinear,
hipStream_t cuStream) const
{
ncvAssertReturn(this->isInitialized(), NCV_UNKNOWN_ERROR);
ncvAssertReturn(outImg.memType() == this->layer0->memType(), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(outRoi.width <= this->layer0->width() && outRoi.height <= this->layer0->height() &&
outRoi.width > 0 && outRoi.height > 0, NCV_DIMENSIONS_INVALID);
if (outRoi.width == this->layer0->width() && outRoi.height == this->layer0->height())
{
ncvAssertReturnNcvStat(this->layer0->copy2D(outImg, NcvSize32u(this->layer0->width(), this->layer0->height()), cuStream));
return NCV_SUCCESS;
}
Ncv32f lastScale = 1.0f;
Ncv32f curScale;
const NCVMatrix<T> *lastLayer = this->layer0;
const NCVMatrix<T> *curLayer = NULL;
NcvBool bUse2Refs = false;
for (Ncv32u i=0; i<this->nLayers-1; i++)
{
curScale = lastScale * 0.5f;
curLayer = this->pyramid[i];
if (outRoi.width == curLayer->width() && outRoi.height == curLayer->height())
{
ncvAssertReturnNcvStat(this->pyramid[i]->copy2D(outImg, NcvSize32u(this->pyramid[i]->width(), this->pyramid[i]->height()), cuStream));
return NCV_SUCCESS;
}
if (outRoi.width >= curLayer->width() && outRoi.height >= curLayer->height())
{
if (outRoi.width < lastLayer->width() && outRoi.height < lastLayer->height())
{
bUse2Refs = true;
}
break;
}
lastScale = curScale;
lastLayer = curLayer;
}
bUse2Refs = bUse2Refs && bTrilinear;
NCV_SET_SKIP_COND(outImg.memType() == NCVMemoryTypeNone);
NcvBool bDeviceCode = this->layer0->memType() == NCVMemoryTypeDevice;
#ifdef SELF_CHECK_GPU
NCVMemNativeAllocator allocCPU(NCVMemoryTypeHostPinned, 512);
#endif
NCV_SKIP_COND_BEGIN
if (bDeviceCode)
{
ncvAssertReturn(bUse2Refs == false, NCV_NOT_IMPLEMENTED);
dim3 bDim(16, 8);
dim3 gDim(divUp(outRoi.width, bDim.x), divUp(outRoi.height, bDim.y));
hipLaunchKernelGGL(( kernelInterpolateFrom1), dim3(gDim), dim3(bDim), 0, cuStream, lastLayer->ptr(),
lastLayer->pitch(),
lastLayer->size(),
outImg.ptr(),
outImg.pitch(),
outRoi);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
#ifdef SELF_CHECK_GPU
ncvSafeMatAlloc(h_lastLayer, T, allocCPU, lastLayer->width(), lastLayer->height(), NCV_ALLOCATOR_BAD_ALLOC);
ncvSafeMatAlloc(h_outImg, T, allocCPU, outImg.width(), outImg.height(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturnNcvStat(lastLayer->copy2D(h_lastLayer, lastLayer->size(), cuStream));
ncvAssertReturnNcvStat(outImg.copy2D(h_outImg, outRoi, cuStream));
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
for (Ncv32u i=0; i<outRoi.height; i++)
{
for (Ncv32u j=0; j<outRoi.width; j++)
{
NcvSize32u szTopLayer(lastLayer->width(), lastLayer->height());
Ncv32f ptTopX = 1.0f * (szTopLayer.width - 1) * j / (outRoi.width - 1);
Ncv32f ptTopY = 1.0f * (szTopLayer.height - 1) * i / (outRoi.height - 1);
T outGold = _interpBilinear(h_lastLayer, ptTopX, ptTopY);
ncvAssertPrintReturn(0 == memcmp(&outGold, &h_outImg.at(j,i), sizeof(T)), "Validation failure in NCVImagePyramid::ctor with kernelInterpolateFrom1", NCV_UNKNOWN_ERROR);
}
}
#endif
}
else
{
for (Ncv32u i=0; i<outRoi.height; i++)
{
for (Ncv32u j=0; j<outRoi.width; j++)
{
//top layer pixel (always exists)
NcvSize32u szTopLayer(lastLayer->width(), lastLayer->height());
Ncv32f ptTopX = 1.0f * (szTopLayer.width - 1) * j / (outRoi.width - 1);
Ncv32f ptTopY = 1.0f * (szTopLayer.height - 1) * i / (outRoi.height - 1);
T topPix = _interpBilinear(*lastLayer, ptTopX, ptTopY);
T trilinearPix = topPix;
if (bUse2Refs)
{
//bottom layer pixel (exists only if the requested scale is greater than the smallest layer scale)
NcvSize32u szBottomLayer(curLayer->width(), curLayer->height());
Ncv32f ptBottomX = 1.0f * (szBottomLayer.width - 1) * j / (outRoi.width - 1);
Ncv32f ptBottomY = 1.0f * (szBottomLayer.height - 1) * i / (outRoi.height - 1);
T bottomPix = _interpBilinear(*curLayer, ptBottomX, ptBottomY);
Ncv32f scale = (1.0f * outRoi.width / layer0->width() + 1.0f * outRoi.height / layer0->height()) / 2;
Ncv32f dl = (scale - curScale) / (lastScale - curScale);
dl = CLAMP(dl, 0.0f, 1.0f);
trilinearPix = _interpLinear(bottomPix, topPix, dl);
}
outImg.at(j, i) = trilinearPix;
}
}
}
NCV_SKIP_COND_END
return NCV_SUCCESS;
}
template class NCVImagePyramid<uchar1>;
template class NCVImagePyramid<uchar3>;
template class NCVImagePyramid<uchar4>;
template class NCVImagePyramid<ushort1>;
template class NCVImagePyramid<ushort3>;
template class NCVImagePyramid<ushort4>;
template class NCVImagePyramid<uint1>;
template class NCVImagePyramid<uint3>;
template class NCVImagePyramid<uint4>;
template class NCVImagePyramid<float1>;
template class NCVImagePyramid<float3>;
template class NCVImagePyramid<float4>;
#endif //_WIN32
| 1884809d8466e7b574a8c80e13fa2cd7c5beff55.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
* Third party copyrights are property of their respective owners.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id: $
* Ported to PCL by Koen Buys : Attention Work in progress!
*/
#include <cuda_runtime.h>
#include <stdio.h>
#include "NCV.hpp"
#include "NCVAlg.hpp"
#include "NCVPyramid.hpp"
#include "NCVPixelOperations.hpp"
//#include "opencv2/gpu/device/common.hpp"
template<typename T, Ncv32u CN> struct __average4_CN {static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11);};
template<typename T> struct __average4_CN<T, 1> {
static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11)
{
T out;
out.x = ((Ncv32s)p00.x + p01.x + p10.x + p11.x + 2) / 4;
return out;
}};
template<> struct __average4_CN<float1, 1> {
static __host__ __device__ float1 _average4_CN(const float1 &p00, const float1 &p01, const float1 &p10, const float1 &p11)
{
float1 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
return out;
}};
template<> struct __average4_CN<double1, 1> {
static __host__ __device__ double1 _average4_CN(const double1 &p00, const double1 &p01, const double1 &p10, const double1 &p11)
{
double1 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
return out;
}};
template<typename T> struct __average4_CN<T, 3> {
static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11)
{
T out;
out.x = ((Ncv32s)p00.x + p01.x + p10.x + p11.x + 2) / 4;
out.y = ((Ncv32s)p00.y + p01.y + p10.y + p11.y + 2) / 4;
out.z = ((Ncv32s)p00.z + p01.z + p10.z + p11.z + 2) / 4;
return out;
}};
template<> struct __average4_CN<float3, 3> {
static __host__ __device__ float3 _average4_CN(const float3 &p00, const float3 &p01, const float3 &p10, const float3 &p11)
{
float3 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
return out;
}};
template<> struct __average4_CN<double3, 3> {
static __host__ __device__ double3 _average4_CN(const double3 &p00, const double3 &p01, const double3 &p10, const double3 &p11)
{
double3 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
return out;
}};
template<typename T> struct __average4_CN<T, 4> {
static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11)
{
T out;
out.x = ((Ncv32s)p00.x + p01.x + p10.x + p11.x + 2) / 4;
out.y = ((Ncv32s)p00.y + p01.y + p10.y + p11.y + 2) / 4;
out.z = ((Ncv32s)p00.z + p01.z + p10.z + p11.z + 2) / 4;
out.w = ((Ncv32s)p00.w + p01.w + p10.w + p11.w + 2) / 4;
return out;
}};
template<> struct __average4_CN<float4, 4> {
static __host__ __device__ float4 _average4_CN(const float4 &p00, const float4 &p01, const float4 &p10, const float4 &p11)
{
float4 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
out.w = (p00.w + p01.w + p10.w + p11.w) / 4;
return out;
}};
template<> struct __average4_CN<double4, 4> {
static __host__ __device__ double4 _average4_CN(const double4 &p00, const double4 &p01, const double4 &p10, const double4 &p11)
{
double4 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
out.w = (p00.w + p01.w + p10.w + p11.w) / 4;
return out;
}};
template<typename T> static __host__ __device__ T _average4(const T &p00, const T &p01, const T &p10, const T &p11)
{
return __average4_CN<T, NC(T)>::_average4_CN(p00, p01, p10, p11);
}
template<typename Tin, typename Tout, Ncv32u CN> struct __lerp_CN {static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d);};
template<typename Tin, typename Tout> struct __lerp_CN<Tin, Tout, 1> {
static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d)
{
using TB = typename TConvVec2Base<Tout>::TBase;
return _pixMake(TB(b.x * d + a.x * (1 - d)));
}};
template<typename Tin, typename Tout> struct __lerp_CN<Tin, Tout, 3> {
static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d)
{
using TB = typename TConvVec2Base<Tout>::TBase;
return _pixMake(TB(b.x * d + a.x * (1 - d)),
TB(b.y * d + a.y * (1 - d)),
TB(b.z * d + a.z * (1 - d)));
}};
template<typename Tin, typename Tout> struct __lerp_CN<Tin, Tout, 4> {
static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d)
{
using TB = typename TConvVec2Base<Tout>::TBase;
return _pixMake(TB(b.x * d + a.x * (1 - d)),
TB(b.y * d + a.y * (1 - d)),
TB(b.z * d + a.z * (1 - d)),
TB(b.w * d + a.w * (1 - d)));
}};
template<typename Tin, typename Tout> static __host__ __device__ Tout _lerp(const Tin &a, const Tin &b, Ncv32f d)
{
return __lerp_CN<Tin, Tout, NC(Tin)>::_lerp_CN(a, b, d);
}
template<typename T>
__global__ void kernelDownsampleX2(T *d_src,
Ncv32u srcPitch,
T *d_dst,
Ncv32u dstPitch,
NcvSize32u dstRoi)
{
Ncv32u i = blockIdx.y * blockDim.y + threadIdx.y;
Ncv32u j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dstRoi.height && j < dstRoi.width)
{
T *d_src_line1 = (T *)((Ncv8u *)d_src + (2 * i + 0) * srcPitch);
T *d_src_line2 = (T *)((Ncv8u *)d_src + (2 * i + 1) * srcPitch);
T *d_dst_line = (T *)((Ncv8u *)d_dst + i * dstPitch);
T p00 = d_src_line1[2*j+0];
T p01 = d_src_line1[2*j+1];
T p10 = d_src_line2[2*j+0];
T p11 = d_src_line2[2*j+1];
d_dst_line[j] = _average4(p00, p01, p10, p11);
}
}
/*
namespace cv { namespace gpu { namespace device
{
namespace pyramid
{
template <typename T> void kernelDownsampleX2_gpu(DevMem2Db src, DevMem2Db dst, cudaStream_t stream)
{
dim3 bDim(16, 8);
dim3 gDim(divUp(src.cols, bDim.x), divUp(src.rows, bDim.y));
kernelDownsampleX2<<<gDim, bDim, 0, stream>>>((T*)src.data, src.step, (T*)dst.data, dst.step, NcvSize32u(dst.cols, dst.rows));
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void kernelDownsampleX2_gpu<uchar1>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<uchar3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<uchar4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<ushort1>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<ushort3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<ushort4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<float1>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<float3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<float4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
}
}}} */
template<typename T>
__global__ void kernelInterpolateFrom1(T *d_srcTop,
Ncv32u srcTopPitch,
NcvSize32u szTopRoi,
T *d_dst,
Ncv32u dstPitch,
NcvSize32u dstRoi)
{
Ncv32u i = blockIdx.y * blockDim.y + threadIdx.y;
Ncv32u j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dstRoi.height && j < dstRoi.width)
{
Ncv32f ptTopX = 1.0f * (szTopRoi.width - 1) * j / (dstRoi.width - 1);
Ncv32f ptTopY = 1.0f * (szTopRoi.height - 1) * i / (dstRoi.height - 1);
Ncv32u xl = (Ncv32u)ptTopX;
Ncv32u xh = xl+1;
Ncv32f dx = ptTopX - xl;
Ncv32u yl = (Ncv32u)ptTopY;
Ncv32u yh = yl+1;
Ncv32f dy = ptTopY - yl;
T *d_src_line1 = (T *)((Ncv8u *)d_srcTop + yl * srcTopPitch);
T *d_src_line2 = (T *)((Ncv8u *)d_srcTop + yh * srcTopPitch);
T *d_dst_line = (T *)((Ncv8u *)d_dst + i * dstPitch);
T p00, p01, p10, p11;
p00 = d_src_line1[xl];
p01 = xh < szTopRoi.width ? d_src_line1[xh] : p00;
p10 = yh < szTopRoi.height ? d_src_line2[xl] : p00;
p11 = (xh < szTopRoi.width && yh < szTopRoi.height) ? d_src_line2[xh] : p00;
using TVFlt = typename TConvBase2Vec<Ncv32f, NC(T)>::TVec;
TVFlt m_00_01 = _lerp<T, TVFlt>(p00, p01, dx);
TVFlt m_10_11 = _lerp<T, TVFlt>(p10, p11, dx);
TVFlt mixture = _lerp<TVFlt, TVFlt>(m_00_01, m_10_11, dy);
T outPix = _pixDemoteClampZ<TVFlt, T>(mixture);
d_dst_line[j] = outPix;
}
}
/*
namespace cv { namespace gpu { namespace device
{
namespace pyramid
{
template <typename T> void kernelInterpolateFrom1_gpu(DevMem2Db src, DevMem2Db dst, cudaStream_t stream)
{
dim3 bDim(16, 8);
dim3 gDim(divUp(dst.cols, bDim.x), divUp(dst.rows, bDim.y));
kernelInterpolateFrom1<<<gDim, bDim, 0, stream>>>((T*) src.data, src.step, NcvSize32u(src.cols, src.rows),
(T*) dst.data, dst.step, NcvSize32u(dst.cols, dst.rows));
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void kernelInterpolateFrom1_gpu<uchar1>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<uchar3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<uchar4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<ushort1>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<ushort3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<ushort4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<float1>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<float3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<float4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
}
}}} */
#if 0 //def _WIN32
template<typename T>
static T _interpLinear(const T &a, const T &b, Ncv32f d)
{
using TVFlt = typename TConvBase2Vec<Ncv32f, NC(T)>::TVec;
TVFlt tmp = _lerp<T, TVFlt>(a, b, d);
return _pixDemoteClampZ<TVFlt, T>(tmp);
}
template<typename T>
static T _interpBilinear(const NCVMatrix<T> &refLayer, Ncv32f x, Ncv32f y)
{
Ncv32u xl = (Ncv32u)x;
Ncv32u xh = xl+1;
Ncv32f dx = x - xl;
Ncv32u yl = (Ncv32u)y;
Ncv32u yh = yl+1;
Ncv32f dy = y - yl;
T p00, p01, p10, p11;
p00 = refLayer.at(xl, yl);
p01 = xh < refLayer.width() ? refLayer.at(xh, yl) : p00;
p10 = yh < refLayer.height() ? refLayer.at(xl, yh) : p00;
p11 = (xh < refLayer.width() && yh < refLayer.height()) ? refLayer.at(xh, yh) : p00;
using TVFlt = typename TConvBase2Vec<Ncv32f, NC(T)>::TVec;
TVFlt m_00_01 = _lerp<T, TVFlt>(p00, p01, dx);
TVFlt m_10_11 = _lerp<T, TVFlt>(p10, p11, dx);
TVFlt mixture = _lerp<TVFlt, TVFlt>(m_00_01, m_10_11, dy);
return _pixDemoteClampZ<TVFlt, T>(mixture);
}
template <class T>
NCVImagePyramid<T>::NCVImagePyramid(const NCVMatrix<T> &img,
Ncv8u numLayers,
INCVMemAllocator &alloc,
cudaStream_t cuStream)
{
this->_isInitialized = false;
ncvAssertPrintReturn(img.memType() == alloc.memType(), "NCVImagePyramid::ctor error", );
this->layer0 = &img;
NcvSize32u szLastLayer(img.width(), img.height());
this->nLayers = 1;
NCV_SET_SKIP_COND(alloc.isCounting());
NcvBool bDeviceCode = alloc.memType() == NCVMemoryTypeDevice;
if (numLayers == 0)
{
numLayers = 255; //it will cut-off when any of the dimensions goes 1
}
#ifdef SELF_CHECK_GPU
NCVMemNativeAllocator allocCPU(NCVMemoryTypeHostPinned, 512);
#endif
for (Ncv32u i=0; i<(Ncv32u)numLayers-1; i++)
{
NcvSize32u szCurLayer(szLastLayer.width / 2, szLastLayer.height / 2);
if (szCurLayer.width == 0 || szCurLayer.height == 0)
{
break;
}
this->pyramid.push_back(new NCVMatrixAlloc<T>(alloc, szCurLayer.width, szCurLayer.height));
ncvAssertPrintReturn(((NCVMatrixAlloc<T> *)(this->pyramid[i]))->isMemAllocated(), "NCVImagePyramid::ctor error", );
this->nLayers++;
//fill in the layer
NCV_SKIP_COND_BEGIN
const NCVMatrix<T> *prevLayer = i == 0 ? this->layer0 : this->pyramid[i-1];
NCVMatrix<T> *curLayer = this->pyramid[i];
if (bDeviceCode)
{
dim3 bDim(16, 8);
dim3 gDim(divUp(szCurLayer.width, bDim.x), divUp(szCurLayer.height, bDim.y));
kernelDownsampleX2<<<gDim, bDim, 0, cuStream>>>(prevLayer->ptr(),
prevLayer->pitch(),
curLayer->ptr(),
curLayer->pitch(),
szCurLayer);
ncvAssertPrintReturn(cudaSuccess == cudaGetLastError(), "NCVImagePyramid::ctor error", );
#ifdef SELF_CHECK_GPU
NCVMatrixAlloc<T> h_prevLayer(allocCPU, prevLayer->width(), prevLayer->height());
ncvAssertPrintReturn(h_prevLayer.isMemAllocated(), "Validation failure in NCVImagePyramid::ctor", );
NCVMatrixAlloc<T> h_curLayer(allocCPU, curLayer->width(), curLayer->height());
ncvAssertPrintReturn(h_curLayer.isMemAllocated(), "Validation failure in NCVImagePyramid::ctor", );
ncvAssertPrintReturn(NCV_SUCCESS == prevLayer->copy2D(h_prevLayer, prevLayer->size(), cuStream), "Validation failure in NCVImagePyramid::ctor", );
ncvAssertPrintReturn(NCV_SUCCESS == curLayer->copy2D(h_curLayer, curLayer->size(), cuStream), "Validation failure in NCVImagePyramid::ctor", );
ncvAssertPrintReturn(cudaSuccess == cudaStreamSynchronize(cuStream), "Validation failure in NCVImagePyramid::ctor", );
for (Ncv32u i=0; i<szCurLayer.height; i++)
{
for (Ncv32u j=0; j<szCurLayer.width; j++)
{
T p00 = h_prevLayer.at(2*j+0, 2*i+0);
T p01 = h_prevLayer.at(2*j+1, 2*i+0);
T p10 = h_prevLayer.at(2*j+0, 2*i+1);
T p11 = h_prevLayer.at(2*j+1, 2*i+1);
T outGold = _average4(p00, p01, p10, p11);
T outGPU = h_curLayer.at(j, i);
ncvAssertPrintReturn(0 == memcmp(&outGold, &outGPU, sizeof(T)), "Validation failure in NCVImagePyramid::ctor with kernelDownsampleX2", );
}
}
#endif
}
else
{
for (Ncv32u i=0; i<szCurLayer.height; i++)
{
for (Ncv32u j=0; j<szCurLayer.width; j++)
{
T p00 = prevLayer->at(2*j+0, 2*i+0);
T p01 = prevLayer->at(2*j+1, 2*i+0);
T p10 = prevLayer->at(2*j+0, 2*i+1);
T p11 = prevLayer->at(2*j+1, 2*i+1);
curLayer->at(j, i) = _average4(p00, p01, p10, p11);
}
}
}
NCV_SKIP_COND_END
szLastLayer = szCurLayer;
}
this->_isInitialized = true;
}
template <class T>
NCVImagePyramid<T>::~NCVImagePyramid()
{
}
template <class T>
NcvBool NCVImagePyramid<T>::isInitialized() const
{
return this->_isInitialized;
}
template <class T>
NCVStatus NCVImagePyramid<T>::getLayer(NCVMatrix<T> &outImg,
NcvSize32u outRoi,
NcvBool bTrilinear,
cudaStream_t cuStream) const
{
ncvAssertReturn(this->isInitialized(), NCV_UNKNOWN_ERROR);
ncvAssertReturn(outImg.memType() == this->layer0->memType(), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(outRoi.width <= this->layer0->width() && outRoi.height <= this->layer0->height() &&
outRoi.width > 0 && outRoi.height > 0, NCV_DIMENSIONS_INVALID);
if (outRoi.width == this->layer0->width() && outRoi.height == this->layer0->height())
{
ncvAssertReturnNcvStat(this->layer0->copy2D(outImg, NcvSize32u(this->layer0->width(), this->layer0->height()), cuStream));
return NCV_SUCCESS;
}
Ncv32f lastScale = 1.0f;
Ncv32f curScale;
const NCVMatrix<T> *lastLayer = this->layer0;
const NCVMatrix<T> *curLayer = NULL;
NcvBool bUse2Refs = false;
for (Ncv32u i=0; i<this->nLayers-1; i++)
{
curScale = lastScale * 0.5f;
curLayer = this->pyramid[i];
if (outRoi.width == curLayer->width() && outRoi.height == curLayer->height())
{
ncvAssertReturnNcvStat(this->pyramid[i]->copy2D(outImg, NcvSize32u(this->pyramid[i]->width(), this->pyramid[i]->height()), cuStream));
return NCV_SUCCESS;
}
if (outRoi.width >= curLayer->width() && outRoi.height >= curLayer->height())
{
if (outRoi.width < lastLayer->width() && outRoi.height < lastLayer->height())
{
bUse2Refs = true;
}
break;
}
lastScale = curScale;
lastLayer = curLayer;
}
bUse2Refs = bUse2Refs && bTrilinear;
NCV_SET_SKIP_COND(outImg.memType() == NCVMemoryTypeNone);
NcvBool bDeviceCode = this->layer0->memType() == NCVMemoryTypeDevice;
#ifdef SELF_CHECK_GPU
NCVMemNativeAllocator allocCPU(NCVMemoryTypeHostPinned, 512);
#endif
NCV_SKIP_COND_BEGIN
if (bDeviceCode)
{
ncvAssertReturn(bUse2Refs == false, NCV_NOT_IMPLEMENTED);
dim3 bDim(16, 8);
dim3 gDim(divUp(outRoi.width, bDim.x), divUp(outRoi.height, bDim.y));
kernelInterpolateFrom1<<<gDim, bDim, 0, cuStream>>>(lastLayer->ptr(),
lastLayer->pitch(),
lastLayer->size(),
outImg.ptr(),
outImg.pitch(),
outRoi);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
#ifdef SELF_CHECK_GPU
ncvSafeMatAlloc(h_lastLayer, T, allocCPU, lastLayer->width(), lastLayer->height(), NCV_ALLOCATOR_BAD_ALLOC);
ncvSafeMatAlloc(h_outImg, T, allocCPU, outImg.width(), outImg.height(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturnNcvStat(lastLayer->copy2D(h_lastLayer, lastLayer->size(), cuStream));
ncvAssertReturnNcvStat(outImg.copy2D(h_outImg, outRoi, cuStream));
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
for (Ncv32u i=0; i<outRoi.height; i++)
{
for (Ncv32u j=0; j<outRoi.width; j++)
{
NcvSize32u szTopLayer(lastLayer->width(), lastLayer->height());
Ncv32f ptTopX = 1.0f * (szTopLayer.width - 1) * j / (outRoi.width - 1);
Ncv32f ptTopY = 1.0f * (szTopLayer.height - 1) * i / (outRoi.height - 1);
T outGold = _interpBilinear(h_lastLayer, ptTopX, ptTopY);
ncvAssertPrintReturn(0 == memcmp(&outGold, &h_outImg.at(j,i), sizeof(T)), "Validation failure in NCVImagePyramid::ctor with kernelInterpolateFrom1", NCV_UNKNOWN_ERROR);
}
}
#endif
}
else
{
for (Ncv32u i=0; i<outRoi.height; i++)
{
for (Ncv32u j=0; j<outRoi.width; j++)
{
//top layer pixel (always exists)
NcvSize32u szTopLayer(lastLayer->width(), lastLayer->height());
Ncv32f ptTopX = 1.0f * (szTopLayer.width - 1) * j / (outRoi.width - 1);
Ncv32f ptTopY = 1.0f * (szTopLayer.height - 1) * i / (outRoi.height - 1);
T topPix = _interpBilinear(*lastLayer, ptTopX, ptTopY);
T trilinearPix = topPix;
if (bUse2Refs)
{
//bottom layer pixel (exists only if the requested scale is greater than the smallest layer scale)
NcvSize32u szBottomLayer(curLayer->width(), curLayer->height());
Ncv32f ptBottomX = 1.0f * (szBottomLayer.width - 1) * j / (outRoi.width - 1);
Ncv32f ptBottomY = 1.0f * (szBottomLayer.height - 1) * i / (outRoi.height - 1);
T bottomPix = _interpBilinear(*curLayer, ptBottomX, ptBottomY);
Ncv32f scale = (1.0f * outRoi.width / layer0->width() + 1.0f * outRoi.height / layer0->height()) / 2;
Ncv32f dl = (scale - curScale) / (lastScale - curScale);
dl = CLAMP(dl, 0.0f, 1.0f);
trilinearPix = _interpLinear(bottomPix, topPix, dl);
}
outImg.at(j, i) = trilinearPix;
}
}
}
NCV_SKIP_COND_END
return NCV_SUCCESS;
}
template class NCVImagePyramid<uchar1>;
template class NCVImagePyramid<uchar3>;
template class NCVImagePyramid<uchar4>;
template class NCVImagePyramid<ushort1>;
template class NCVImagePyramid<ushort3>;
template class NCVImagePyramid<ushort4>;
template class NCVImagePyramid<uint1>;
template class NCVImagePyramid<uint3>;
template class NCVImagePyramid<uint4>;
template class NCVImagePyramid<float1>;
template class NCVImagePyramid<float3>;
template class NCVImagePyramid<float4>;
#endif //_WIN32
|
fc10df464d956fdff9862fbbfc68fb37a2c8dee2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <cstdlib>
#include <sys/time.h>
#include <iostream>
using namespace std;
__global__ void T_binary(int*bin_dev, int *_del) {
int bli = blockIdx.x * blockDim.x;
int idx = threadIdx.x;
bin_dev[bli + idx] = blockIdx.x / _del[idx] % 2;
}
__global__ void bin_multiplication(int *bin_dev, int* weight_dev, int *s_dev, int*values_dev)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
s_dev[i] = bin_dev[i] * values_dev[threadIdx.x];
bin_dev[i] = bin_dev[i] * weight_dev[threadIdx.x];
}
__global__ void summing(int* in_dev, int* sums) {
extern __shared__ int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = in_dev[i];
//__syncthreads();
//unroled summing
#pragma unroll
for(ushort i = 1;i < 15;i++){
sdata[0]+=sdata[i];
}
__syncthreads();
// write result for this block to global mem
sums[blockIdx.x] = sdata[0];
}
__global__ void additional_summing(int *whatToAdd,int *whereToAdd){
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
whereToAdd[i+32768] = whereToAdd[i]+whatToAdd[15];
}
__global__ void zeroing(int *w, int *s, int W) {
int bli = blockIdx.x * blockDim.x;
int idx = threadIdx.x;
if (w[bli+idx] > W) { s[bli + idx] = 0; w[bli + idx] = 0; }
}
__global__ void reduction_max(int* s) {
extern __shared__ int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = s[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s>0; s >>= 1) {
if (tid < s) {
if (sdata[tid] < sdata[tid + s])
sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) s[blockIdx.x] = sdata[0];
}
__global__ void cycle_max(int *s, int N) {
//__shared__ int max[32];
unsigned int tid = threadIdx.x;
int off = N / 2;
if (tid < off) {
if (s[tid] < s[tid + off]) {
s[tid] = s[tid + off];
}
}
}
int main()
{
int W = 350;
cout<<W<<"\n";
int arraySize=1;
while(arraySize<17){
cout<<"Size is "<<arraySize;
//cout<<"Enter size of array (1-16): ";
//cin>>arraySize;
struct timeval t0,t1;
gettimeofday(&t0, NULL);
int totalSize = arraySize*pow(2,arraySize);//total number of elements to sum
int strSize_b = pow(2,arraySize);//number of strings in binary array and connected with him arrays
int flag=0;//flag is used if number of items equals to 16
if (arraySize>15){
strSize_b/=(pow(2,(arraySize-15)));//special features
flag=1;
}
int *Sum=new int[totalSize];
int *s;
int *bin_dev;
int *weight_dev;
int weight[16] = { 5,10,17,19,20, 23,26,30,32,38, 40,44,47,50,55,56 };// 55, 56, 60, 62, 66, 70 };
int values[16] = { 10,13,16,22,30, 25,55,90,110,115, 130,120,150,170,194,199 };// , 194, 199, 217, 230, 248 };
int *w;
int *values_dev;
int *del = new int[arraySize], *dev_del;
hipMalloc((void**)&dev_del, arraySize * sizeof(int));
for (int i = 0; i < arraySize; i++) {
del[i] = pow(2, i);//array of dergrees of 2
}
hipMemcpy(dev_del, del, arraySize * sizeof(int), hipMemcpyHostToDevice);//copying array of degrees of 2 to global memory of gpu
hipMalloc((void**)&bin_dev, totalSize * sizeof(int));//allocation memory in global memory of GPU for binary array
int*s_dev;
hipMalloc((void**)&s_dev, totalSize * sizeof(int));//allocation memory in global memory of GPU for array of values multiplied to binary table
hipMalloc((void**)&weight_dev, arraySize * sizeof(int));//allocation memory in global memory of GPU for weights's array
hipMalloc((void**)&s, totalSize * sizeof(int));//allocation memory in global memory of GPU array of sums of values
hipMalloc((void**)&values_dev, arraySize * sizeof(int));//allocation memory in global memory of GPU for values's array
hipMalloc((void**)&w, totalSize * sizeof(int));//allocation memory in global memory of GPU for array of sums of weights
hipMemcpy(weight_dev, weight, arraySize * sizeof(int), hipMemcpyHostToDevice);//copying of array of weights to global memory of GPU
hipMemcpy(values_dev, values, arraySize * sizeof(int), hipMemcpyHostToDevice);//copying of array of values to global memory of GPU
//creating of binary table
T_binary << <strSize_b, arraySize >> > (bin_dev, dev_del);
//multiplication of weight and value parameters of each item on binary table strings
bin_multiplication << <strSize_b, arraySize >> > (bin_dev, weight_dev, s_dev, values_dev);
summing << <strSize_b, arraySize,arraySize*sizeof(int) >> > (bin_dev, w);
summing << <strSize_b, arraySize,arraySize*sizeof(int) >> > (s_dev, s);
//dances with tambourine
int a=totalSize/arraySize/1024;
int b = 1024;
if (a==0){
a=1;
b = pow(2,arraySize);}
//additional actions if flag==1
if(flag==1){hipLaunchKernelGGL((
additional_summing), dim3(a), dim3(b), 0, 0, weight_dev,w);hipLaunchKernelGGL((
additional_summing), dim3(a), dim3(b), 0, 0, values_dev,s);
}
//zeroing of unsuitable item's combinations
zeroing << <a, b >> > (w, s, W);
//finding maximal value for each block
reduction_max << <a,b,b*sizeof(int) >> > (s);
if(flag==1){
cycle_max << <2, 32 >> > (s,32);
}
//second step of finding maximal value
for (int i = a; i >= 1; i /= 2) {
cycle_max << <1, i >> > (s,i);
}
hipMemcpy(Sum, s, sizeof(int), hipMemcpyDeviceToHost);//copying maximal value back from GPU
cout <<"\n"<<"GPU max = " << Sum[0];
//memory freeing
hipFree(bin_dev);
hipFree(weight_dev);
hipFree(s);
hipFree(w);
hipFree(s_dev);
long sec = (t1.tv_sec-t0.tv_sec);
long usec = t1.tv_usec-t0.tv_usec;
cout<<"GPU time is "<<sec<<","<<usec<<"\n";
//CPU version
gettimeofday(&t0, NULL);
//float fTimeStart = clock() / (float)(CLOCKS_PER_SEC);
if(flag==1){strSize_b*=2;}
//creating of binary array
int **bin = new int*[strSize_b];
for(int i=0;i<strSize_b;i++){
bin[i] = new int[arraySize];
}
int k = 0;
//filling of binary array
for (int i = 0; i < strSize_b; i++) {
k = i;
for (int j = 0; j <arraySize; j++) {
bin[i][j] = k % 2;
k /= 2;
}
}
//creating of arrays for multiplication of weights and values to binary array
int **prices = new int*[strSize_b];
int **weights = new int*[strSize_b];
for(int i = 0; i < strSize_b; i++){
prices[i] = new int[arraySize];
weights[i] = new int[arraySize];
}
//multiplication of weights and values to binary array
int *Sweig = new int[strSize_b];
int *Sval = new int[strSize_b];
for (int i = 0; i < strSize_b; i++) {
for (int j = 0; j < arraySize; j++) {
weights[i][j] = weight[j] * bin[i][j];
prices[i][j] = values[j] * bin[i][j];
}
}
//summing of arrays
for (int i = 0; i < strSize_b; i++) {
Sweig[i] = 0 ;Sval[i] = 0;
for (int j = 0; j < arraySize; j++) {
Sweig[i] += weights[i][j];
Sval[i] += prices[i][j];
}
}
//finding of maximal values
int max = 0; k = 0;
for (int i = 0; i < strSize_b; i++) {
if ((Sweig[i] <= W) && (Sval[i] > max)) {
//k = i;
max = Sval[i];
}
}
//float fTimeStop = clock() / (float)CLOCKS_PER_SEC;
cout << " CPU max = " << max << "\n";
//cout << "CPU time is " << (fTimeStop - fTimeStart) * 1000 << " milli-seconds\n";
//memory freeing
for(int i = 0; i < strSize_b; i++){
delete [] bin[i];
delete [] prices[i];
delete [] weights[i];
}
delete [] Sweig;
delete [] Sval;
gettimeofday(&t1, 0);
sec = (t1.tv_sec-t0.tv_sec);
usec = t1.tv_usec-t0.tv_usec;
cout<<"CPU time is "<<sec<<","<<usec<<"\n";
arraySize++;}
return 0;
}
| fc10df464d956fdff9862fbbfc68fb37a2c8dee2.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <cstdlib>
#include <sys/time.h>
#include <iostream>
using namespace std;
__global__ void T_binary(int*bin_dev, int *_del) {
int bli = blockIdx.x * blockDim.x;
int idx = threadIdx.x;
bin_dev[bli + idx] = blockIdx.x / _del[idx] % 2;
}
__global__ void bin_multiplication(int *bin_dev, int* weight_dev, int *s_dev, int*values_dev)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
s_dev[i] = bin_dev[i] * values_dev[threadIdx.x];
bin_dev[i] = bin_dev[i] * weight_dev[threadIdx.x];
}
__global__ void summing(int* in_dev, int* sums) {
extern __shared__ int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = in_dev[i];
//__syncthreads();
//unroled summing
#pragma unroll
for(ushort i = 1;i < 15;i++){
sdata[0]+=sdata[i];
}
__syncthreads();
// write result for this block to global mem
sums[blockIdx.x] = sdata[0];
}
__global__ void additional_summing(int *whatToAdd,int *whereToAdd){
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
whereToAdd[i+32768] = whereToAdd[i]+whatToAdd[15];
}
__global__ void zeroing(int *w, int *s, int W) {
int bli = blockIdx.x * blockDim.x;
int idx = threadIdx.x;
if (w[bli+idx] > W) { s[bli + idx] = 0; w[bli + idx] = 0; }
}
__global__ void reduction_max(int* s) {
extern __shared__ int sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = s[i];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s>0; s >>= 1) {
if (tid < s) {
if (sdata[tid] < sdata[tid + s])
sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) s[blockIdx.x] = sdata[0];
}
__global__ void cycle_max(int *s, int N) {
//__shared__ int max[32];
unsigned int tid = threadIdx.x;
int off = N / 2;
if (tid < off) {
if (s[tid] < s[tid + off]) {
s[tid] = s[tid + off];
}
}
}
int main()
{
int W = 350;
cout<<W<<"\n";
int arraySize=1;
while(arraySize<17){
cout<<"Size is "<<arraySize;
//cout<<"Enter size of array (1-16): ";
//cin>>arraySize;
struct timeval t0,t1;
gettimeofday(&t0, NULL);
int totalSize = arraySize*pow(2,arraySize);//total number of elements to sum
int strSize_b = pow(2,arraySize);//number of strings in binary array and connected with him arrays
int flag=0;//flag is used if number of items equals to 16
if (arraySize>15){
strSize_b/=(pow(2,(arraySize-15)));//special features
flag=1;
}
int *Sum=new int[totalSize];
int *s;
int *bin_dev;
int *weight_dev;
int weight[16] = { 5,10,17,19,20, 23,26,30,32,38, 40,44,47,50,55,56 };// 55, 56, 60, 62, 66, 70 };
int values[16] = { 10,13,16,22,30, 25,55,90,110,115, 130,120,150,170,194,199 };// , 194, 199, 217, 230, 248 };
int *w;
int *values_dev;
int *del = new int[arraySize], *dev_del;
cudaMalloc((void**)&dev_del, arraySize * sizeof(int));
for (int i = 0; i < arraySize; i++) {
del[i] = pow(2, i);//array of dergrees of 2
}
cudaMemcpy(dev_del, del, arraySize * sizeof(int), cudaMemcpyHostToDevice);//copying array of degrees of 2 to global memory of gpu
cudaMalloc((void**)&bin_dev, totalSize * sizeof(int));//allocation memory in global memory of GPU for binary array
int*s_dev;
cudaMalloc((void**)&s_dev, totalSize * sizeof(int));//allocation memory in global memory of GPU for array of values multiplied to binary table
cudaMalloc((void**)&weight_dev, arraySize * sizeof(int));//allocation memory in global memory of GPU for weights's array
cudaMalloc((void**)&s, totalSize * sizeof(int));//allocation memory in global memory of GPU array of sums of values
cudaMalloc((void**)&values_dev, arraySize * sizeof(int));//allocation memory in global memory of GPU for values's array
cudaMalloc((void**)&w, totalSize * sizeof(int));//allocation memory in global memory of GPU for array of sums of weights
cudaMemcpy(weight_dev, weight, arraySize * sizeof(int), cudaMemcpyHostToDevice);//copying of array of weights to global memory of GPU
cudaMemcpy(values_dev, values, arraySize * sizeof(int), cudaMemcpyHostToDevice);//copying of array of values to global memory of GPU
//creating of binary table
T_binary << <strSize_b, arraySize >> > (bin_dev, dev_del);
//multiplication of weight and value parameters of each item on binary table strings
bin_multiplication << <strSize_b, arraySize >> > (bin_dev, weight_dev, s_dev, values_dev);
summing << <strSize_b, arraySize,arraySize*sizeof(int) >> > (bin_dev, w);
summing << <strSize_b, arraySize,arraySize*sizeof(int) >> > (s_dev, s);
//dances with tambourine
int a=totalSize/arraySize/1024;
int b = 1024;
if (a==0){
a=1;
b = pow(2,arraySize);}
//additional actions if flag==1
if(flag==1){
additional_summing<<<a, b>>>(weight_dev,w);
additional_summing<<<a, b>>>(values_dev,s);
}
//zeroing of unsuitable item's combinations
zeroing << <a, b >> > (w, s, W);
//finding maximal value for each block
reduction_max << <a,b,b*sizeof(int) >> > (s);
if(flag==1){
cycle_max << <2, 32 >> > (s,32);
}
//second step of finding maximal value
for (int i = a; i >= 1; i /= 2) {
cycle_max << <1, i >> > (s,i);
}
cudaMemcpy(Sum, s, sizeof(int), cudaMemcpyDeviceToHost);//copying maximal value back from GPU
cout <<"\n"<<"GPU max = " << Sum[0];
//memory freeing
cudaFree(bin_dev);
cudaFree(weight_dev);
cudaFree(s);
cudaFree(w);
cudaFree(s_dev);
long sec = (t1.tv_sec-t0.tv_sec);
long usec = t1.tv_usec-t0.tv_usec;
cout<<"GPU time is "<<sec<<","<<usec<<"\n";
//CPU version
gettimeofday(&t0, NULL);
//float fTimeStart = clock() / (float)(CLOCKS_PER_SEC);
if(flag==1){strSize_b*=2;}
//creating of binary array
int **bin = new int*[strSize_b];
for(int i=0;i<strSize_b;i++){
bin[i] = new int[arraySize];
}
int k = 0;
//filling of binary array
for (int i = 0; i < strSize_b; i++) {
k = i;
for (int j = 0; j <arraySize; j++) {
bin[i][j] = k % 2;
k /= 2;
}
}
//creating of arrays for multiplication of weights and values to binary array
int **prices = new int*[strSize_b];
int **weights = new int*[strSize_b];
for(int i = 0; i < strSize_b; i++){
prices[i] = new int[arraySize];
weights[i] = new int[arraySize];
}
//multiplication of weights and values to binary array
int *Sweig = new int[strSize_b];
int *Sval = new int[strSize_b];
for (int i = 0; i < strSize_b; i++) {
for (int j = 0; j < arraySize; j++) {
weights[i][j] = weight[j] * bin[i][j];
prices[i][j] = values[j] * bin[i][j];
}
}
//summing of arrays
for (int i = 0; i < strSize_b; i++) {
Sweig[i] = 0 ;Sval[i] = 0;
for (int j = 0; j < arraySize; j++) {
Sweig[i] += weights[i][j];
Sval[i] += prices[i][j];
}
}
//finding of maximal values
int max = 0; k = 0;
for (int i = 0; i < strSize_b; i++) {
if ((Sweig[i] <= W) && (Sval[i] > max)) {
//k = i;
max = Sval[i];
}
}
//float fTimeStop = clock() / (float)CLOCKS_PER_SEC;
cout << " CPU max = " << max << "\n";
//cout << "CPU time is " << (fTimeStop - fTimeStart) * 1000 << " milli-seconds\n";
//memory freeing
for(int i = 0; i < strSize_b; i++){
delete [] bin[i];
delete [] prices[i];
delete [] weights[i];
}
delete [] Sweig;
delete [] Sval;
gettimeofday(&t1, 0);
sec = (t1.tv_sec-t0.tv_sec);
usec = t1.tv_usec-t0.tv_usec;
cout<<"CPU time is "<<sec<<","<<usec<<"\n";
arraySize++;}
return 0;
}
|
a97e9f0cbab15bdad6ac003a39955c03f475106b.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __GDD_GQD_INLINE_CU__
#define __GDD_GQD_INLINE_CU__
#define _QD_SPLITTER (134217729.0) // = 2^27 + 1
#define _QD_SPLIT_THRESH (6.6969287949141707e+299) // = 2^996
// For translate
union trans {
unsigned __int64 asInt64;
double asDouble;
};
// Basic functions =============================================================
// computes fl( a + b ) and err( a + b ), assumes |a| > |b|
__forceinline__ __device__
double quick_two_sum( double a, double b, double &err ) {
//double abs_a = fabs(a);
//double abs_b = fabs(b);
//if (!(abs_a > abs_b)) {
// double t = a;
// a = b;
// b = t;
//}
//assert(fabs(a) >= fabs(b));
//if(b == 0.0) {
// err = 0.0;
// return a;
//}
double s = a + b;
err = b - (s - a);
return s;
}
// Computes fl(a+b) and err(a+b).
__forceinline__ __device__
double two_sum( double a, double b, double &err ) {
//if( (a == 0.0) || (b == 0.0) ) {
// err = 0.0;
// return (a + b);
//}
double s = a + b;
double bb = s - a;
err = (a - (s - bb)) + (b - bb);
return s;
}
// Computes fl( a - b ) and err( a - b ), assumes |a| >= |b|
__forceinline__ __device__
double quick_two_diff( double a, double b, double &err ) {
//if (!(fabs(a) >= fabs(b))) {
// double t = a;
// a = b;
// b = t;
//}
//assert(fabs(a) >= fabs(b));
//if (a == b) {
// err = 0.0;
// return 0.0;
//}
double s = a - b;
err = (a - s) - b;
return s;
}
// Computes fl( a - b ) and err( a - b )
__forceinline__ __device__
double two_diff( double a, double b, double &err ) {
//if(a == b) {
// err = 0.0;
// return 0.0;
//}
double s = a - b;
double bb = s - a;
err = (a - (s - bb)) - (b + bb);
return s;
}
// Computes high word and lo word of a
#ifndef USE_FMA
__forceinline__ __device__
void split(double a, double &hi, double &lo) {
double temp;
if (a > _QD_SPLIT_THRESH || a < -_QD_SPLIT_THRESH)
{
a *= 3.7252902984619140625e-09; // 2^-28
temp = _QD_SPLITTER * a;
hi = temp - (temp - a);
lo = a - hi;
hi *= 268435456.0; // 2^28
lo *= 268435456.0; // 2^28
} else {
temp = _QD_SPLITTER * a;
hi = temp - (temp - a);
lo = a - hi;
}
}
#endif
// Computes fl(a*b) and err(a*b).
__forceinline__ __device__
double two_prod(double a, double b, double &err) {
#ifdef USE_FMA
double p = a * b;
err = fma(a, b, -p);
return p;
#else
double a_hi, a_lo, b_hi, b_lo;
double p = a * b;
split(a, a_hi, a_lo);
split(b, b_hi, b_lo);
//err = ((a_hi * b_hi - p) + a_hi * b_lo + a_lo * b_hi) + a_lo * b_lo;
err = (a_hi*b_hi) - p + (a_hi*b_lo) + (a_lo*b_hi) + (a_lo*b_lo);
return p;
#endif
}
// Computes fl(a*a) and err(a*a). Faster than calling two_prod(a, a, err).
__forceinline__ __device__
double two_sqr(double a, double &err) {
#ifdef USE_FMA
double p = a * a;
err = fma(a, a, -p);
return p;
#else
double hi, lo;
double q = a * a;
split(a, hi, lo);
err = ((hi * hi - q) + 2.0 * hi * lo) + lo * lo;
return q;
#endif
}
// Computes the nearest integer to d.
__forceinline__ __device__
double nint(double d) {
if (d == ::floor(d)){
return d;
}
return ::floor(d + 0.5);
}
__device__
bool is_positive(double a) {
const unsigned __int64 cons = 0x8000000000000000ULL;
trans t;
t.asDouble = a;
if (t.asInt64 == 0x7ff8000000000000ULL) return false;
bool result = ((t.asInt64 & cons) == 0);
return result;
}
__device__
bool is_negative(double a) {
const unsigned __int64 cons = 0x8000000000000000ULL;
trans t;
t.asDouble = a;
if (t.asInt64 == 0xfff8000000000000ULL) return false;
bool result = ((t.asInt64 & cons) == cons);
return result;
}
#endif /* __GDD_GQD_INLINE_CU__ */
| a97e9f0cbab15bdad6ac003a39955c03f475106b.cu | #ifndef __GDD_GQD_INLINE_CU__
#define __GDD_GQD_INLINE_CU__
#define _QD_SPLITTER (134217729.0) // = 2^27 + 1
#define _QD_SPLIT_THRESH (6.6969287949141707e+299) // = 2^996
// For translate
union trans {
unsigned __int64 asInt64;
double asDouble;
};
// Basic functions =============================================================
// computes fl( a + b ) and err( a + b ), assumes |a| > |b|
__forceinline__ __device__
double quick_two_sum( double a, double b, double &err ) {
//double abs_a = fabs(a);
//double abs_b = fabs(b);
//if (!(abs_a > abs_b)) {
// double t = a;
// a = b;
// b = t;
//}
//assert(fabs(a) >= fabs(b));
//if(b == 0.0) {
// err = 0.0;
// return a;
//}
double s = a + b;
err = b - (s - a);
return s;
}
// Computes fl(a+b) and err(a+b).
__forceinline__ __device__
double two_sum( double a, double b, double &err ) {
//if( (a == 0.0) || (b == 0.0) ) {
// err = 0.0;
// return (a + b);
//}
double s = a + b;
double bb = s - a;
err = (a - (s - bb)) + (b - bb);
return s;
}
// Computes fl( a - b ) and err( a - b ), assumes |a| >= |b|
__forceinline__ __device__
double quick_two_diff( double a, double b, double &err ) {
//if (!(fabs(a) >= fabs(b))) {
// double t = a;
// a = b;
// b = t;
//}
//assert(fabs(a) >= fabs(b));
//if (a == b) {
// err = 0.0;
// return 0.0;
//}
double s = a - b;
err = (a - s) - b;
return s;
}
// Computes fl( a - b ) and err( a - b )
__forceinline__ __device__
double two_diff( double a, double b, double &err ) {
//if(a == b) {
// err = 0.0;
// return 0.0;
//}
double s = a - b;
double bb = s - a;
err = (a - (s - bb)) - (b + bb);
return s;
}
// Computes high word and lo word of a
#ifndef USE_FMA
__forceinline__ __device__
void split(double a, double &hi, double &lo) {
double temp;
if (a > _QD_SPLIT_THRESH || a < -_QD_SPLIT_THRESH)
{
a *= 3.7252902984619140625e-09; // 2^-28
temp = _QD_SPLITTER * a;
hi = temp - (temp - a);
lo = a - hi;
hi *= 268435456.0; // 2^28
lo *= 268435456.0; // 2^28
} else {
temp = _QD_SPLITTER * a;
hi = temp - (temp - a);
lo = a - hi;
}
}
#endif
// Computes fl(a*b) and err(a*b).
__forceinline__ __device__
double two_prod(double a, double b, double &err) {
#ifdef USE_FMA
double p = a * b;
err = fma(a, b, -p);
return p;
#else
double a_hi, a_lo, b_hi, b_lo;
double p = a * b;
split(a, a_hi, a_lo);
split(b, b_hi, b_lo);
//err = ((a_hi * b_hi - p) + a_hi * b_lo + a_lo * b_hi) + a_lo * b_lo;
err = (a_hi*b_hi) - p + (a_hi*b_lo) + (a_lo*b_hi) + (a_lo*b_lo);
return p;
#endif
}
// Computes fl(a*a) and err(a*a). Faster than calling two_prod(a, a, err).
__forceinline__ __device__
double two_sqr(double a, double &err) {
#ifdef USE_FMA
double p = a * a;
err = fma(a, a, -p);
return p;
#else
double hi, lo;
double q = a * a;
split(a, hi, lo);
err = ((hi * hi - q) + 2.0 * hi * lo) + lo * lo;
return q;
#endif
}
// Computes the nearest integer to d.
__forceinline__ __device__
double nint(double d) {
if (d == std::floor(d)){
return d;
}
return std::floor(d + 0.5);
}
__device__
bool is_positive(double a) {
const unsigned __int64 cons = 0x8000000000000000ULL;
trans t;
t.asDouble = a;
if (t.asInt64 == 0x7ff8000000000000ULL) return false;
bool result = ((t.asInt64 & cons) == 0);
return result;
}
__device__
bool is_negative(double a) {
const unsigned __int64 cons = 0x8000000000000000ULL;
trans t;
t.asDouble = a;
if (t.asInt64 == 0xfff8000000000000ULL) return false;
bool result = ((t.asInt64 & cons) == cons);
return result;
}
#endif /* __GDD_GQD_INLINE_CU__ */
|
35910eee957e32424bae731997fab5d4522234ef.hip | // !!! This is a file automatically generated by hipify!!!
/*
device.cu
Contains the __device__ functions and __constant__s for CUDASieve
by Curtis Seizert <[email protected]>
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math_functions.h>
#include "CUDASieve/device.cuh"
/*
############################################
###### Bitmask arrays for small sieve ######
############################################
As far as I am aware, constant cache is the best place for these, although
the access pattern to them would not seem to be the best for this. Actually,
it should be more suited to texture memory, but I don't know how to use that.
In any event, the profiler says this is not a big deal.
*/
__constant__ uint32_t p3[3] = {0x92492492, 0x24924924, 0x49249249};
__constant__ uint32_t p5[5] = {0x08421084, 0x42108421, 0x10842108, 0x84210842,
0x21084210};
__constant__ uint32_t p7[7] = {0x81020408, 0x08102040, 0x40810204, 0x04081020,
0x20408102, 0x02040810, 0x10204081};
__constant__ uint32_t p11[11] = {0x08010020, 0x10020040, 0x20040080, 0x40080100,
0x80100200, 0x00200400, 0x00400801, 0x00801002,
0x01002004, 0x02004008, 0x04008010};
__constant__ uint32_t p13[13] = {0x00080040, 0x04002001, 0x00100080, 0x08004002,
0x00200100, 0x10008004, 0x00400200, 0x20010008,
0x00800400, 0x40020010, 0x01000800, 0x80040020,
0x02001000};
__constant__ uint32_t p17[17] = {0x02000100, 0x08000400, 0x20001000, 0x80004000,
0x00010000, 0x00040002, 0x00100008, 0x00400020,
0x01000080, 0x04000200, 0x10000800, 0x40002000,
0x00008000, 0x00020001, 0x00080004, 0x00200010,
0x00800040};
__constant__ uint32_t p19[19] = {0x10000200, 0x00008000, 0x00200004, 0x08000100,
0x00004000, 0x00100002, 0x04000080, 0x00002000,
0x00080001, 0x02000040, 0x80001000, 0x00040000,
0x01000020, 0x40000800, 0x00020000, 0x00800010,
0x20000400, 0x00010000, 0x00400008};
__constant__ uint32_t p23[23] = {0x00000800, 0x02000004, 0x00010000, 0x40000080,
0x00200000, 0x00001000, 0x04000008, 0x00020000,
0x80000100, 0x00400000, 0x00002000, 0x08000010,
0x00040000, 0x00000200, 0x00800001, 0x00004000,
0x10000020, 0x00080000, 0x00000400, 0x01000002,
0x00008000, 0x20000040, 0x00100000};
__constant__ uint32_t p29[29] = {0x00004000, 0x00000800, 0x00000100, 0x00000020,
0x80000004, 0x10000000, 0x02000000, 0x00400000,
0x00080000, 0x00010000, 0x00002000, 0x00000400,
0x00000080, 0x00000010, 0x40000002, 0x08000000,
0x01000000, 0x00200000, 0x00040000, 0x00008000,
0x00001000, 0x00000200, 0x00000040, 0x00000008,
0x20000001, 0x04000000, 0x00800000, 0x00100000,
0x00020000};
__constant__ uint32_t p31[31] = {0x00008000, 0x00004000, 0x00002000, 0x00001000,
0x00000800, 0x00000400, 0x00000200, 0x00000100,
0x00000080, 0x00000040, 0x00000020, 0x00000010,
0x00000008, 0x00000004, 0x00000002, 0x80000001,
0x40000000, 0x20000000, 0x10000000, 0x08000000,
0x04000000, 0x02000000, 0x01000000, 0x00800000,
0x00400000, 0x00200000, 0x00100000, 0x00080000,
0x00040000, 0x00020000, 0x00010000};
__constant__ uint32_t p37[37] = {0x00040000, 0x00800000, 0x10000000, 0x00000000,
0x00000002, 0x00000040, 0x00000800, 0x00010000,
0x00200000, 0x04000000, 0x80000000, 0x00000000,
0x00000010, 0x00000200, 0x00004000, 0x00080000,
0x01000000, 0x20000000, 0x00000000, 0x00000004,
0x00000080, 0x00001000, 0x00020000, 0x00400000,
0x08000000, 0x00000000, 0x00000001, 0x00000020,
0x00000400, 0x00008000, 0x00100000, 0x02000000,
0x40000000, 0x00000000, 0x00000008, 0x00000100,
0x00002000};
__constant__ uint8_t wheel30[8] = {1,7,11,13,17,19,23,29};
__constant__ uint8_t wheel30Inc[8] = {6,4,2,4,2,4,6,2};
__constant__ uint8_t lookup30[30] = {0,0,0,0,0,0,0,1,0,0,0,2,0,3,0,0,0,4,0,5,0,0,
0,6,0,0,0,0,0,7};
__constant__ uint16_t threads = 256;
/*
#############################################
###### Bitmask sieve for small primes #######
#############################################
This is an idea used in Ben Buhrow's implementation and it provides a considerable
(~4x) speedup vs. sieving these primes individually. For some reason, unrolling
this loop does not increase the speed, possibly due to divergence. CUDASieve has
a janky little c++ (utils/bitsievegen.cpp) program for generating such bitmasks and
outputting them to the standard out, because doing this by hand would be an onerous
task. This should allow anyone interested to try their own optimizations based
on chaning parameters of the sieve (size of words, etc.) without having to do this
part by hand.
*/
__device__ void device::sieveSmallPrimes(uint32_t * s_sieve, uint32_t sieveWords,
uint64_t bstart)
{
#pragma unroll 1
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads){
uint64_t j = i + bstart/64; // 64 is 32 bits per uint32_t*2(for only odds)
s_sieve[i] |= p3[j%3];
s_sieve[i] |= p5[j%5];
s_sieve[i] |= p7[j%7]; // sieving with 37 in this way provides a consistent
s_sieve[i] |= p11[j%11]; // 1-2% speedup over going up to only 31. Going
s_sieve[i] |= p13[j%13]; // higher than 37 slows things down. Using a premade
s_sieve[i] |= p17[j%17]; // wheel-type bitmask here is considerably slower
s_sieve[i] |= p19[j%19]; // than sieving with each small prime individually.
s_sieve[i] |= p23[j%23];
s_sieve[i] |= p29[j%29];
s_sieve[i] |= p31[j%31];
s_sieve[i] |= p37[j%37];
}
}
/*
######################################################
###### Specialized sieve for making primelist ########
######################################################
This sieve uses odds to cross off composites before a list of sieving primes is
created. For all 16 bit primes, this only amounts to the odds between 41 and 255
starting at their squares. While this would perhaps be more efficient using a
wheel, it is so fast anyway that who cares. The entire process of generating the
first list takes only 0.1 ms even on the relatively weak GTX 750.
*/
__device__ void device::sieveFirstBottom(uint32_t * s_sieve, uint32_t sieveBits)
{
if(threadIdx.x == 0 && blockIdx.x == 0) atomicOr(&s_sieve[0], 1u);
uint32_t p = 41 + 2*threadIdx.x;
uint32_t off = p * p/2;
for(; off < sieveBits; off += p) atomicOr(&s_sieve[off >> 5], (1u << (off & 31)));
}
/* #######################################################
###### Sieve functions for primes 37 < p < 2^20 #######
#######################################################
These functions are meant to do the majority of the crossing-off in large sieves
to exploit the better latency characteristics of shared vs. global memory. Their
calculation of the first number to mark off is based on modulo p, so it becomes
very inefficient for large prime numbers. However, storing buckets for such primes
is not feasible with this implementation because of the large number of blocks
active at one time, so the modulo calculation is still apparently the best way
to deal with these relatively small primes in order to cross off their multiples
in shared memory.
*/
__device__ void device::sieveMedPrimes(uint32_t * s_sieve, uint32_t * d_primeList,
uint64_t bstart, uint32_t primeListLength,
uint32_t sieveBits)
{
for(uint32_t pidx = threadIdx.x; pidx < primeListLength; pidx += threads){
// this accepts a list of sieving primes > 37
uint32_t p = d_primeList[pidx];
uint32_t off = p - bstart % p;
if(off%2==0) off += p;
off = off >> 1; // convert offset to align with half sieve
for(; off < sieveBits; off += p) atomicOr(&s_sieve[off >> 5], (1u << (off & 31)));
// this loop takes ~75% of the kernel time
}
}
__device__ void device::sieveMedPrimesBase(uint32_t * s_sieve, uint32_t * d_primeList,
uint64_t bstart, uint32_t primeListLength,
uint32_t sieveBits, bool forPrimeList = 0)
{
if(threadIdx.x == 0){
if(forPrimeList) s_sieve[0] |= 1; // cross off one
else s_sieve[0] ^= 0x0004cb6e; // un-cross off 3-37 if regular sieve
}
for(uint32_t pidx = threadIdx.x; pidx < primeListLength; pidx += threads){
// this accepts a list of sieving primes > 37
uint32_t p = d_primeList[pidx];
uint32_t off = p*p/2; // convert offset to align with half sieve
for(; off < sieveBits; off += p) atomicOr(&s_sieve[off >> 5], (1u << (off & 31)));
}
}
/* ##################################################
##### Functions to zero or load SMem sieves ######
##################################################
*/
__device__ void device::sieveInit(uint32_t * s_sieve, uint32_t sieveWords)
{
//#pragma unroll
for(uint16_t i = threadIdx.x; i < sieveWords; i += blockDim.x)
s_sieve[i] ^= s_sieve[i];
}
__device__ void device::sieveInit(uint32_t * s_sieve, uint32_t * d_bigSieve,
uint32_t sieveWords)
{
uint32_t blockStart = sieveWords*blockIdx.x;
for(uint32_t i = threadIdx.x; i < sieveWords; i += threads){
s_sieve[i] = d_bigSieve[blockStart+i];
}
}
/* ##################################
###### Counting functions #######
##################################
Note: some of these (as indicated) destroy the sieve data, and they are differentiated
from those that don't by overloading.
*/
// retains the original sieve data, reduces to block size
__device__ void device::countPrimes(uint32_t * s_sieve, uint16_t * s_counts,
uint32_t sieveWords)
{
uint16_t count = 0;
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads)
{
uint32_t s = ~s_sieve[i];
count += __popc(s);
}
__syncthreads();
s_counts[threadIdx.x] = count;
}
// retains the original sieve data, maintains primes per word
__device__ void device::countPrimesHist(uint32_t * s_sieve, uint32_t * s_counts,
uint32_t sieveWords)
{
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads)
s_counts[i] = __popc(~s_sieve[i]);
__syncthreads();
}
// destroys the original sieve data, maintains primes per word
__device__ void device::countPrimesHist(uint32_t * s_sieve, uint32_t sieveWords,
uint64_t bstart, uint64_t maxPrime)
{
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads)
{
uint32_t s = ~s_sieve[i];
uint16_t count = 0;
for(uint16_t j = 0; j < 32; j++){
if(1 & (s >> j)){
uint64_t p = bstart + 64*i + 2*j + 1;
if(p <= maxPrime) count++; // only count primes less than top
}
}
s_sieve[i] = count;
}
__syncthreads();
}
// destroys original sieve data
__device__ void device::countPrimes(uint32_t * s_sieve, uint32_t sieveWords)
{
uint16_t count = 0;
#pragma unroll
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads)
{
uint32_t s = ~s_sieve[i];
s_sieve[i] ^= s_sieve[i];
count += __popc(s);
}
__syncthreads();
s_sieve[threadIdx.x] = count;
}
__device__ void device::countTopPrimes(uint32_t * s_sieve, uint32_t sieveWords,
uint64_t bstart, uint64_t top)
{
uint32_t count = 0;
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads){
uint32_t s = ~s_sieve[i];
s_sieve[i] ^= s_sieve[i]; // to make a number that can't be the result in
// order to see if it has been modified later
for(uint16_t j = 0; j < 32; j++){
if(1 & (s >> j)){
uint64_t p = bstart + 64*i + 2*j + 1;
if(p <= top) count++; // only count primes less than top
}
}
}
s_sieve[threadIdx.x] = count;
__syncthreads();
}
/*
##########################################################################
###### Functions for moving the count or sieve out of shared memory ######
##########################################################################
*/
__device__ void device::moveCount(uint32_t * s_sieve, volatile uint64_t * d_count, bool isTop)
{
if(threadIdx.x == 0)
{
uint64_t count = 0;
for(uint16_t i=0; i < threads; i++) count += s_sieve[i];
if(isTop) atomicAdd((unsigned long long *)d_count, count);
else atomicAdd((unsigned long long *)d_count, (int) -count);
}
__syncthreads();
}
__device__ void device::moveCountHist(uint32_t * s_sieve, uint32_t * d_histogram)
{
if(threadIdx.x == 0)
{
uint64_t count = 0;
for(uint16_t i=0; i < threads; i++)
count += s_sieve[i];
d_histogram[blockIdx.x] = count;
}
__syncthreads();
}
__device__ void device::makeBigSieve(uint32_t * bigSieve, uint32_t * s_sieve,
uint32_t sieveWords)
{
uint32_t blockStart = sieveWords*blockIdx.x;
for(uint32_t i = threadIdx.x; i < sieveWords; i += threads)
atomicOr(&bigSieve[i+blockStart], s_sieve[i]);
}
/* ##################################################################
###### Functions for generating the list of sieving primes #######
##################################################################
*/
__device__ void device::inclusiveScan(uint32_t * s_array, uint32_t size)
{
uint32_t tidx = threadIdx.x;
uint32_t sum;
for(uint32_t offset = 1; offset <= size/2; offset *= 2){
if(tidx >= offset){
sum = s_array[threadIdx.x] + s_array[threadIdx.x - offset];
}else{sum = s_array[threadIdx.x];}
__syncthreads();
s_array[threadIdx.x] = sum;
__syncthreads();
}
}
// 16 bit data type
__device__ void device::exclusiveScan(uint16_t * s_array, uint32_t size)
{
uint32_t tidx = threadIdx.x;
uint32_t sum;
for(uint32_t offset = 1; offset <= size/2; offset *= 2){
if(tidx >= offset){
sum = s_array[tidx] + s_array[tidx - offset];
}else{sum = s_array[tidx];}
__syncthreads();
s_array[tidx] = sum;
__syncthreads();
}
if(threadIdx.x != 0) sum = s_array[threadIdx.x-1];
else sum = 0;
__syncthreads();
s_array[threadIdx.x] = sum;
}
/*
Exclusive scan function suitable for medium sized lists, as part of its operation
is serialized to avoid needing multiple stages whenever the number of items
to be incremented is greater than the number of threads
*/
__device__ void device::exclusiveScanBig(uint32_t * s_array, uint32_t size)
{
uint32_t tidx = threadIdx.x;
uint32_t sum;
for(uint32_t offset = 1; offset <= size/2; offset *= 2){
for(int32_t i = size- 1 - tidx; i >= 0; i -= threads){
if(i >= offset){
sum = s_array[i] + s_array[i - offset];
}else{sum = s_array[i];}
__syncthreads();
s_array[i] = sum;
__syncthreads();
}
}
for(int32_t i = size - 1 - threadIdx.x; i >= 0; i -= threads){
if (i > 0) sum = s_array[i-1];
else sum = 0;
__syncthreads();
s_array[i] = sum;
__syncthreads();
}
}
template <typename T>
__device__ void device::movePrimes(uint32_t * s_sieve, uint16_t * s_counts,
uint32_t sieveWords, T * d_primeOut,
uint32_t * d_histogram, uint64_t bstart, T maxPrime)
{
// this is meant for when words per array == number of threads
uint16_t i = threadIdx.x;
uint16_t c = 0; // used to hold the count
uint32_t s = ~s_sieve[i]; // primes are now represented as 1s
// offset for where each thread should put its first prime
uint32_t idx = d_histogram[blockIdx.x] + s_counts[i];
__syncthreads();
// s_sieve[0] is made ~0 so we can tell if it has been changed
if(threadIdx.x == 0) s_sieve[0] |= ~s_sieve[0];
for(uint16_t j = 0; j < 32; j++){
if(1 & (s >> j)){ // if prime
T p = bstart + 64*i + 2*j + 1; // calculate value
// if value is above threshold, submit and break
if(p > maxPrime) {atomicMin(&s_sieve[0], idx+c); break;}
else d_primeOut[idx+c] = p; // otherwise copy p to the output array
c++; // incrememnt count
}
}
__syncthreads();
if(threadIdx.x == blockDim.x-1){
if(~s_sieve[0] != 0) d_histogram[blockIdx.x] = s_sieve[0];
else d_histogram[blockIdx.x] = idx + c;
}
// this covers up one since the sieve only holds odds
if(threadIdx.x == 1 && bstart == 0) d_primeOut[0] = 2;
}
/*
This is the version of the movePrimes function that is used for generating the original
list of sieving primes to be used by the next round of list generating functions. Unlike
above versions of the function, it supports array sizes greater than the number of
threads in the block. I could probably get rid of one of the above.
*/
__device__ void device::movePrimesFirst(uint32_t * s_sieve, uint32_t * s_counts,
uint32_t sieveWords, uint32_t * d_primeList,
volatile uint64_t * d_count, uint64_t bstart,
uint32_t maxPrime)
{
// this is for when words per array != number of threads
uint16_t c;
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads){
c = s_counts[i];
uint32_t s = ~s_sieve[i];
__syncthreads();
if(i == 0) s_sieve[0] |= ~s_sieve[0];
for(uint16_t j = 0; j < 32; j++){
if(1 & (s >> j)){
uint32_t p = bstart + 64*i + 2*j + 1;
if(p > maxPrime) atomicMin(&s_sieve[0], c);
else d_primeList[c] = p;
c++;
}
}
}
__syncthreads();
if(threadIdx.x == 0 && ~s_sieve[0] != 0) atomicAdd((unsigned long long *)d_count, s_sieve[0] );
if((threadIdx.x == blockDim.x - 1) && ~s_sieve[0] == 0) atomicAdd((unsigned long long *)d_count, c);
}
| 35910eee957e32424bae731997fab5d4522234ef.cu | /*
device.cu
Contains the __device__ functions and __constant__s for CUDASieve
by Curtis Seizert <[email protected]>
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <math_functions.h>
#include "CUDASieve/device.cuh"
/*
############################################
###### Bitmask arrays for small sieve ######
############################################
As far as I am aware, constant cache is the best place for these, although
the access pattern to them would not seem to be the best for this. Actually,
it should be more suited to texture memory, but I don't know how to use that.
In any event, the profiler says this is not a big deal.
*/
__constant__ uint32_t p3[3] = {0x92492492, 0x24924924, 0x49249249};
__constant__ uint32_t p5[5] = {0x08421084, 0x42108421, 0x10842108, 0x84210842,
0x21084210};
__constant__ uint32_t p7[7] = {0x81020408, 0x08102040, 0x40810204, 0x04081020,
0x20408102, 0x02040810, 0x10204081};
__constant__ uint32_t p11[11] = {0x08010020, 0x10020040, 0x20040080, 0x40080100,
0x80100200, 0x00200400, 0x00400801, 0x00801002,
0x01002004, 0x02004008, 0x04008010};
__constant__ uint32_t p13[13] = {0x00080040, 0x04002001, 0x00100080, 0x08004002,
0x00200100, 0x10008004, 0x00400200, 0x20010008,
0x00800400, 0x40020010, 0x01000800, 0x80040020,
0x02001000};
__constant__ uint32_t p17[17] = {0x02000100, 0x08000400, 0x20001000, 0x80004000,
0x00010000, 0x00040002, 0x00100008, 0x00400020,
0x01000080, 0x04000200, 0x10000800, 0x40002000,
0x00008000, 0x00020001, 0x00080004, 0x00200010,
0x00800040};
__constant__ uint32_t p19[19] = {0x10000200, 0x00008000, 0x00200004, 0x08000100,
0x00004000, 0x00100002, 0x04000080, 0x00002000,
0x00080001, 0x02000040, 0x80001000, 0x00040000,
0x01000020, 0x40000800, 0x00020000, 0x00800010,
0x20000400, 0x00010000, 0x00400008};
__constant__ uint32_t p23[23] = {0x00000800, 0x02000004, 0x00010000, 0x40000080,
0x00200000, 0x00001000, 0x04000008, 0x00020000,
0x80000100, 0x00400000, 0x00002000, 0x08000010,
0x00040000, 0x00000200, 0x00800001, 0x00004000,
0x10000020, 0x00080000, 0x00000400, 0x01000002,
0x00008000, 0x20000040, 0x00100000};
__constant__ uint32_t p29[29] = {0x00004000, 0x00000800, 0x00000100, 0x00000020,
0x80000004, 0x10000000, 0x02000000, 0x00400000,
0x00080000, 0x00010000, 0x00002000, 0x00000400,
0x00000080, 0x00000010, 0x40000002, 0x08000000,
0x01000000, 0x00200000, 0x00040000, 0x00008000,
0x00001000, 0x00000200, 0x00000040, 0x00000008,
0x20000001, 0x04000000, 0x00800000, 0x00100000,
0x00020000};
__constant__ uint32_t p31[31] = {0x00008000, 0x00004000, 0x00002000, 0x00001000,
0x00000800, 0x00000400, 0x00000200, 0x00000100,
0x00000080, 0x00000040, 0x00000020, 0x00000010,
0x00000008, 0x00000004, 0x00000002, 0x80000001,
0x40000000, 0x20000000, 0x10000000, 0x08000000,
0x04000000, 0x02000000, 0x01000000, 0x00800000,
0x00400000, 0x00200000, 0x00100000, 0x00080000,
0x00040000, 0x00020000, 0x00010000};
__constant__ uint32_t p37[37] = {0x00040000, 0x00800000, 0x10000000, 0x00000000,
0x00000002, 0x00000040, 0x00000800, 0x00010000,
0x00200000, 0x04000000, 0x80000000, 0x00000000,
0x00000010, 0x00000200, 0x00004000, 0x00080000,
0x01000000, 0x20000000, 0x00000000, 0x00000004,
0x00000080, 0x00001000, 0x00020000, 0x00400000,
0x08000000, 0x00000000, 0x00000001, 0x00000020,
0x00000400, 0x00008000, 0x00100000, 0x02000000,
0x40000000, 0x00000000, 0x00000008, 0x00000100,
0x00002000};
__constant__ uint8_t wheel30[8] = {1,7,11,13,17,19,23,29};
__constant__ uint8_t wheel30Inc[8] = {6,4,2,4,2,4,6,2};
__constant__ uint8_t lookup30[30] = {0,0,0,0,0,0,0,1,0,0,0,2,0,3,0,0,0,4,0,5,0,0,
0,6,0,0,0,0,0,7};
__constant__ uint16_t threads = 256;
/*
#############################################
###### Bitmask sieve for small primes #######
#############################################
This is an idea used in Ben Buhrow's implementation and it provides a considerable
(~4x) speedup vs. sieving these primes individually. For some reason, unrolling
this loop does not increase the speed, possibly due to divergence. CUDASieve has
a janky little c++ (utils/bitsievegen.cpp) program for generating such bitmasks and
outputting them to the standard out, because doing this by hand would be an onerous
task. This should allow anyone interested to try their own optimizations based
on chaning parameters of the sieve (size of words, etc.) without having to do this
part by hand.
*/
__device__ void device::sieveSmallPrimes(uint32_t * s_sieve, uint32_t sieveWords,
uint64_t bstart)
{
#pragma unroll 1
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads){
uint64_t j = i + bstart/64; // 64 is 32 bits per uint32_t*2(for only odds)
s_sieve[i] |= p3[j%3];
s_sieve[i] |= p5[j%5];
s_sieve[i] |= p7[j%7]; // sieving with 37 in this way provides a consistent
s_sieve[i] |= p11[j%11]; // 1-2% speedup over going up to only 31. Going
s_sieve[i] |= p13[j%13]; // higher than 37 slows things down. Using a premade
s_sieve[i] |= p17[j%17]; // wheel-type bitmask here is considerably slower
s_sieve[i] |= p19[j%19]; // than sieving with each small prime individually.
s_sieve[i] |= p23[j%23];
s_sieve[i] |= p29[j%29];
s_sieve[i] |= p31[j%31];
s_sieve[i] |= p37[j%37];
}
}
/*
######################################################
###### Specialized sieve for making primelist ########
######################################################
This sieve uses odds to cross off composites before a list of sieving primes is
created. For all 16 bit primes, this only amounts to the odds between 41 and 255
starting at their squares. While this would perhaps be more efficient using a
wheel, it is so fast anyway that who cares. The entire process of generating the
first list takes only 0.1 ms even on the relatively weak GTX 750.
*/
__device__ void device::sieveFirstBottom(uint32_t * s_sieve, uint32_t sieveBits)
{
if(threadIdx.x == 0 && blockIdx.x == 0) atomicOr(&s_sieve[0], 1u);
uint32_t p = 41 + 2*threadIdx.x;
uint32_t off = p * p/2;
for(; off < sieveBits; off += p) atomicOr(&s_sieve[off >> 5], (1u << (off & 31)));
}
/* #######################################################
###### Sieve functions for primes 37 < p < 2^20 #######
#######################################################
These functions are meant to do the majority of the crossing-off in large sieves
to exploit the better latency characteristics of shared vs. global memory. Their
calculation of the first number to mark off is based on modulo p, so it becomes
very inefficient for large prime numbers. However, storing buckets for such primes
is not feasible with this implementation because of the large number of blocks
active at one time, so the modulo calculation is still apparently the best way
to deal with these relatively small primes in order to cross off their multiples
in shared memory.
*/
__device__ void device::sieveMedPrimes(uint32_t * s_sieve, uint32_t * d_primeList,
uint64_t bstart, uint32_t primeListLength,
uint32_t sieveBits)
{
for(uint32_t pidx = threadIdx.x; pidx < primeListLength; pidx += threads){
// this accepts a list of sieving primes > 37
uint32_t p = d_primeList[pidx];
uint32_t off = p - bstart % p;
if(off%2==0) off += p;
off = off >> 1; // convert offset to align with half sieve
for(; off < sieveBits; off += p) atomicOr(&s_sieve[off >> 5], (1u << (off & 31)));
// this loop takes ~75% of the kernel time
}
}
__device__ void device::sieveMedPrimesBase(uint32_t * s_sieve, uint32_t * d_primeList,
uint64_t bstart, uint32_t primeListLength,
uint32_t sieveBits, bool forPrimeList = 0)
{
if(threadIdx.x == 0){
if(forPrimeList) s_sieve[0] |= 1; // cross off one
else s_sieve[0] ^= 0x0004cb6e; // un-cross off 3-37 if regular sieve
}
for(uint32_t pidx = threadIdx.x; pidx < primeListLength; pidx += threads){
// this accepts a list of sieving primes > 37
uint32_t p = d_primeList[pidx];
uint32_t off = p*p/2; // convert offset to align with half sieve
for(; off < sieveBits; off += p) atomicOr(&s_sieve[off >> 5], (1u << (off & 31)));
}
}
/* ##################################################
##### Functions to zero or load SMem sieves ######
##################################################
*/
__device__ void device::sieveInit(uint32_t * s_sieve, uint32_t sieveWords)
{
//#pragma unroll
for(uint16_t i = threadIdx.x; i < sieveWords; i += blockDim.x)
s_sieve[i] ^= s_sieve[i];
}
__device__ void device::sieveInit(uint32_t * s_sieve, uint32_t * d_bigSieve,
uint32_t sieveWords)
{
uint32_t blockStart = sieveWords*blockIdx.x;
for(uint32_t i = threadIdx.x; i < sieveWords; i += threads){
s_sieve[i] = d_bigSieve[blockStart+i];
}
}
/* ##################################
###### Counting functions #######
##################################
Note: some of these (as indicated) destroy the sieve data, and they are differentiated
from those that don't by overloading.
*/
// retains the original sieve data, reduces to block size
__device__ void device::countPrimes(uint32_t * s_sieve, uint16_t * s_counts,
uint32_t sieveWords)
{
uint16_t count = 0;
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads)
{
uint32_t s = ~s_sieve[i];
count += __popc(s);
}
__syncthreads();
s_counts[threadIdx.x] = count;
}
// retains the original sieve data, maintains primes per word
__device__ void device::countPrimesHist(uint32_t * s_sieve, uint32_t * s_counts,
uint32_t sieveWords)
{
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads)
s_counts[i] = __popc(~s_sieve[i]);
__syncthreads();
}
// destroys the original sieve data, maintains primes per word
__device__ void device::countPrimesHist(uint32_t * s_sieve, uint32_t sieveWords,
uint64_t bstart, uint64_t maxPrime)
{
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads)
{
uint32_t s = ~s_sieve[i];
uint16_t count = 0;
for(uint16_t j = 0; j < 32; j++){
if(1 & (s >> j)){
uint64_t p = bstart + 64*i + 2*j + 1;
if(p <= maxPrime) count++; // only count primes less than top
}
}
s_sieve[i] = count;
}
__syncthreads();
}
// destroys original sieve data
__device__ void device::countPrimes(uint32_t * s_sieve, uint32_t sieveWords)
{
uint16_t count = 0;
#pragma unroll
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads)
{
uint32_t s = ~s_sieve[i];
s_sieve[i] ^= s_sieve[i];
count += __popc(s);
}
__syncthreads();
s_sieve[threadIdx.x] = count;
}
__device__ void device::countTopPrimes(uint32_t * s_sieve, uint32_t sieveWords,
uint64_t bstart, uint64_t top)
{
uint32_t count = 0;
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads){
uint32_t s = ~s_sieve[i];
s_sieve[i] ^= s_sieve[i]; // to make a number that can't be the result in
// order to see if it has been modified later
for(uint16_t j = 0; j < 32; j++){
if(1 & (s >> j)){
uint64_t p = bstart + 64*i + 2*j + 1;
if(p <= top) count++; // only count primes less than top
}
}
}
s_sieve[threadIdx.x] = count;
__syncthreads();
}
/*
##########################################################################
###### Functions for moving the count or sieve out of shared memory ######
##########################################################################
*/
__device__ void device::moveCount(uint32_t * s_sieve, volatile uint64_t * d_count, bool isTop)
{
if(threadIdx.x == 0)
{
uint64_t count = 0;
for(uint16_t i=0; i < threads; i++) count += s_sieve[i];
if(isTop) atomicAdd((unsigned long long *)d_count, count);
else atomicAdd((unsigned long long *)d_count, (int) -count);
}
__syncthreads();
}
__device__ void device::moveCountHist(uint32_t * s_sieve, uint32_t * d_histogram)
{
if(threadIdx.x == 0)
{
uint64_t count = 0;
for(uint16_t i=0; i < threads; i++)
count += s_sieve[i];
d_histogram[blockIdx.x] = count;
}
__syncthreads();
}
__device__ void device::makeBigSieve(uint32_t * bigSieve, uint32_t * s_sieve,
uint32_t sieveWords)
{
uint32_t blockStart = sieveWords*blockIdx.x;
for(uint32_t i = threadIdx.x; i < sieveWords; i += threads)
atomicOr(&bigSieve[i+blockStart], s_sieve[i]);
}
/* ##################################################################
###### Functions for generating the list of sieving primes #######
##################################################################
*/
__device__ void device::inclusiveScan(uint32_t * s_array, uint32_t size)
{
uint32_t tidx = threadIdx.x;
uint32_t sum;
for(uint32_t offset = 1; offset <= size/2; offset *= 2){
if(tidx >= offset){
sum = s_array[threadIdx.x] + s_array[threadIdx.x - offset];
}else{sum = s_array[threadIdx.x];}
__syncthreads();
s_array[threadIdx.x] = sum;
__syncthreads();
}
}
// 16 bit data type
__device__ void device::exclusiveScan(uint16_t * s_array, uint32_t size)
{
uint32_t tidx = threadIdx.x;
uint32_t sum;
for(uint32_t offset = 1; offset <= size/2; offset *= 2){
if(tidx >= offset){
sum = s_array[tidx] + s_array[tidx - offset];
}else{sum = s_array[tidx];}
__syncthreads();
s_array[tidx] = sum;
__syncthreads();
}
if(threadIdx.x != 0) sum = s_array[threadIdx.x-1];
else sum = 0;
__syncthreads();
s_array[threadIdx.x] = sum;
}
/*
Exclusive scan function suitable for medium sized lists, as part of its operation
is serialized to avoid needing multiple stages whenever the number of items
to be incremented is greater than the number of threads
*/
__device__ void device::exclusiveScanBig(uint32_t * s_array, uint32_t size)
{
uint32_t tidx = threadIdx.x;
uint32_t sum;
for(uint32_t offset = 1; offset <= size/2; offset *= 2){
for(int32_t i = size- 1 - tidx; i >= 0; i -= threads){
if(i >= offset){
sum = s_array[i] + s_array[i - offset];
}else{sum = s_array[i];}
__syncthreads();
s_array[i] = sum;
__syncthreads();
}
}
for(int32_t i = size - 1 - threadIdx.x; i >= 0; i -= threads){
if (i > 0) sum = s_array[i-1];
else sum = 0;
__syncthreads();
s_array[i] = sum;
__syncthreads();
}
}
template <typename T>
__device__ void device::movePrimes(uint32_t * s_sieve, uint16_t * s_counts,
uint32_t sieveWords, T * d_primeOut,
uint32_t * d_histogram, uint64_t bstart, T maxPrime)
{
// this is meant for when words per array == number of threads
uint16_t i = threadIdx.x;
uint16_t c = 0; // used to hold the count
uint32_t s = ~s_sieve[i]; // primes are now represented as 1s
// offset for where each thread should put its first prime
uint32_t idx = d_histogram[blockIdx.x] + s_counts[i];
__syncthreads();
// s_sieve[0] is made ~0 so we can tell if it has been changed
if(threadIdx.x == 0) s_sieve[0] |= ~s_sieve[0];
for(uint16_t j = 0; j < 32; j++){
if(1 & (s >> j)){ // if prime
T p = bstart + 64*i + 2*j + 1; // calculate value
// if value is above threshold, submit and break
if(p > maxPrime) {atomicMin(&s_sieve[0], idx+c); break;}
else d_primeOut[idx+c] = p; // otherwise copy p to the output array
c++; // incrememnt count
}
}
__syncthreads();
if(threadIdx.x == blockDim.x-1){
if(~s_sieve[0] != 0) d_histogram[blockIdx.x] = s_sieve[0];
else d_histogram[blockIdx.x] = idx + c;
}
// this covers up one since the sieve only holds odds
if(threadIdx.x == 1 && bstart == 0) d_primeOut[0] = 2;
}
/*
This is the version of the movePrimes function that is used for generating the original
list of sieving primes to be used by the next round of list generating functions. Unlike
above versions of the function, it supports array sizes greater than the number of
threads in the block. I could probably get rid of one of the above.
*/
__device__ void device::movePrimesFirst(uint32_t * s_sieve, uint32_t * s_counts,
uint32_t sieveWords, uint32_t * d_primeList,
volatile uint64_t * d_count, uint64_t bstart,
uint32_t maxPrime)
{
// this is for when words per array != number of threads
uint16_t c;
for(uint16_t i = threadIdx.x; i < sieveWords; i += threads){
c = s_counts[i];
uint32_t s = ~s_sieve[i];
__syncthreads();
if(i == 0) s_sieve[0] |= ~s_sieve[0];
for(uint16_t j = 0; j < 32; j++){
if(1 & (s >> j)){
uint32_t p = bstart + 64*i + 2*j + 1;
if(p > maxPrime) atomicMin(&s_sieve[0], c);
else d_primeList[c] = p;
c++;
}
}
}
__syncthreads();
if(threadIdx.x == 0 && ~s_sieve[0] != 0) atomicAdd((unsigned long long *)d_count, s_sieve[0] );
if((threadIdx.x == blockDim.x - 1) && ~s_sieve[0] == 0) atomicAdd((unsigned long long *)d_count, c);
}
|
1008bc1299e4820a5992c873f33dcb61942e4a84.hip | // !!! This is a file automatically generated by hipify!!!
//THIS PROGRAM GENERATES MONTECARLO DATA GIVEN AN AMPLITUDE MODEL
//ROOT
#include <TFile.h>
#include <TTree.h>
#include <iostream>
#include <string>
// GooFit stuff
#include "goofit/Variable.h"
#include "goofit/PDFs/basic/PolynomialPdf.h"
#include "goofit/UnbinnedDataSet.h"
#include "goofit/PDFs/physics/DP4Pdf.h"
#include "goofit/PDFs/physics/TruthResolution_Aux.h"
#include "goofit/PDFs/physics/Tddp4Pdf.h"
#include <thrust/count.h>
#include <fstream>
#include <iomanip>
#include <numeric>
#include <algorithm>
#include <random>
#include <ctime>
#include <functional>
#include <mcbooster/functors/FlagAcceptReject.h>
using namespace std;
// Constants used in more than one PDF component.
const fptype _mD0 = 1.8645;
const fptype piPlusMass = 0.13957018;
const fptype KmMass = .493677;
int main (int argc, char** argv) {
// hipSetDevice(0);
DecayInfo_DP* DK3P_DI = new DecayInfo_DP();
DK3P_DI->meson_radius =5;
DK3P_DI->particle_masses.push_back(_mD0);
DK3P_DI->particle_masses.push_back(piPlusMass);
DK3P_DI->particle_masses.push_back(piPlusMass);
DK3P_DI->particle_masses.push_back(KmMass);
DK3P_DI->particle_masses.push_back(piPlusMass);
Variable* RhoMass = new Variable("rho_mass", 0.77526);
Variable* RhoWidth = new Variable("rho_width", 0.1478);
Variable* K892M = new Variable("K892M", 0.89581);
Variable* K892W = new Variable("K892W", 0.0474);
Variable* f600M = new Variable("f600M", 0.519);
Variable* f600W = new Variable("f600W", 0.454);
Variable* a1M = new Variable("a1M", 1.237);
Variable* a1W = new Variable("a1W", 0.526);
Variable* K1_1270M = new Variable("K1_1270M", 1.28241);
Variable* K1_1270W = new Variable("K1_1270W", 0.06596);
Variable* K0_1430M = new Variable("K0_1430M", 1.425);
Variable* K0_1430W = new Variable("K0_1430W", 0.27);
Variable* K1410M = new Variable("K1410M", 1.414);
Variable* K1410W = new Variable("K1410W", 0.232);
Variable* rho1450M = new Variable("rho1450M", 1.465);
Variable* rho1450W = new Variable("rho1450W", 0.400);
Variable* K1460M = new Variable("K1460M", 1.351);
Variable* K1460W = new Variable("K1460W", 0.281);
Variable* f0_1370M = new Variable("f0_1370M", 1.350);
Variable* f0_1370W = new Variable("f0_1370W", 0.35);
Variable* K1_1400M = new Variable("K1_1400M", 1.403);
Variable* K1_1400W = new Variable("K1_1400W", 0.174);
Variable* K2_1430M = new Variable("K2_1430M", 1.4256);
Variable* K2_1430W = new Variable("K2_1430W", 0.0985);
std::vector<Variable*> LassVars;
LassVars.push_back( new Variable("lass_a",2.07) );
LassVars.push_back( new Variable("lass_r",3.32) );
LassVars.push_back( new Variable("lass_pf",0.0) );
LassVars.push_back( new Variable("lass_pr",0.0) );
LassVars.push_back( new Variable("lass_F",1.0) );
//Spin factors: we have two due to the bose symmetrization of the two pi+
std::vector<SpinFactor*> SF_K892_rho770_S;
SF_K892_rho770_S.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_S, 0, 1, 2, 3) );
SF_K892_rho770_S.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_S, 3, 1, 2, 0) );
//Lineshapes, also for both pi+ configurations
std::vector<Lineshape*> LS_K892_rho770_S;
LS_K892_rho770_S.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K892_rho770_S.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K892_rho770_S.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
LS_K892_rho770_S.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K892_rho770_P;
SF_K892_rho770_P.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_P, 0, 1, 2, 3) );
SF_K892_rho770_P.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 0, 1, 2, 3) );
SF_K892_rho770_P.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_P, 3, 1, 2, 0) );
SF_K892_rho770_P.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K892_rho770_P;
LS_K892_rho770_P.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K892_rho770_P.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K892_rho770_P.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
LS_K892_rho770_P.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K892_rho770_D;
SF_K892_rho770_D.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_D, 0, 1, 2, 3) );
SF_K892_rho770_D.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L2, 0, 1, 2, 3) );
SF_K892_rho770_D.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_D, 3, 1, 2, 0) );
SF_K892_rho770_D.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L2, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K892_rho770_D;
LS_K892_rho770_D.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K892_rho770_D.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K892_rho770_D.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
LS_K892_rho770_D.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K1410_rho770_S;
SF_K1410_rho770_S.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_S, 0, 1, 2, 3) );
SF_K1410_rho770_S.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_S, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K1410_rho770_S;
LS_K1410_rho770_S.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K1410_rho770_S.push_back( new Lineshape("K*(1410)", K1410M, K1410W, 1, M_34, LS::BW, FF::BL2) );
LS_K1410_rho770_S.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
LS_K1410_rho770_S.push_back( new Lineshape("K*(1410)", K1410M, K1410W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K1410_rho770_P;
SF_K1410_rho770_P.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_P, 0, 1, 2, 3) );
SF_K1410_rho770_P.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 0, 1, 2, 3) );
SF_K1410_rho770_P.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_P, 3, 1, 2, 0) );
SF_K1410_rho770_P.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K1410_rho770_P;
LS_K1410_rho770_P.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K1410_rho770_P.push_back( new Lineshape("K*(1410)", K1410M, K1410W, 1, M_34, LS::BW, FF::BL2) );
LS_K1410_rho770_P.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
LS_K1410_rho770_P.push_back( new Lineshape("K*(1410)", K1410M, K1410W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K892_f0_600;
SF_K892_f0_600.push_back( new SpinFactor("SF", SF_4Body::DtoVS_VtoP1P2_StoP3P4, 2, 3, 0, 1) );
SF_K892_f0_600.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 2, 3, 0, 1) );
SF_K892_f0_600.push_back( new SpinFactor("SF", SF_4Body::DtoVS_VtoP1P2_StoP3P4, 2, 0, 3, 1) );
SF_K892_f0_600.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 2, 0, 3, 1) );
std::vector<Lineshape*> LS_K892_f0_600;
LS_K892_f0_600.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K892_f0_600.push_back( new Lineshape("f600", f600M, f600W, 0, M_12, LS::Bugg3, FF::BL2) );
LS_K892_f0_600.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
LS_K892_f0_600.push_back( new Lineshape("f600", f600M, f600W, 0, M_24, LS::Bugg3, FF::BL2) );
std::vector<SpinFactor*> SF_rho1450_K0_1430;
SF_rho1450_K0_1430.push_back( new SpinFactor("SF", SF_4Body::DtoVS_VtoP1P2_StoP3P4, 0, 1, 2, 3) );
SF_rho1450_K0_1430.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 0, 1, 2, 3) );
SF_rho1450_K0_1430.push_back( new SpinFactor("SF", SF_4Body::DtoVS_VtoP1P2_StoP3P4, 3, 1, 2, 0) );
SF_rho1450_K0_1430.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_rho1450_K0_1430;
LS_rho1450_K0_1430.push_back( new Lineshape("rho(1450)", rho1450M, rho1450W, 1, M_12, LS::BW, FF::BL2) );
LS_rho1450_K0_1430.push_back( new Lineshape("K(0)*(1430)", K0_1430M, K0_1430W, 0, M_34, LS::Lass_M3, FF::BL2, 1.5, LassVars) );
LS_rho1450_K0_1430.push_back( new Lineshape("rho(1450)", rho1450M, rho1450W, 1, M_24, LS::BW, FF::BL2) );
LS_rho1450_K0_1430.push_back( new Lineshape("K(0)*(1430)", K0_1430M, K0_1430W, 0, M_13, LS::Lass_M3, FF::BL2, 1.5, LassVars) );
std::vector<SpinFactor*> SF_K1460_K892;
SF_K1460_K892.push_back( new SpinFactor("SF", SF_4Body::DtoPP1_PtoVP2_VtoP3P4, 0, 1, 2, 3) );
SF_K1460_K892.push_back( new SpinFactor("SF", SF_4Body::DtoPP1_PtoVP2_VtoP3P4, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K1460_K892;
LS_K1460_K892.push_back( new Lineshape("K1460", K1460M, K1460W, 1, M_34_2, LS::BW, FF::BL2) );
LS_K1460_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K1460_K892.push_back( new Lineshape("K1460", K1460M, K1460W, 1, M_13_2, LS::BW, FF::BL2) );
LS_K1460_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K1460_f0_1370;
SF_K1460_f0_1370.push_back( new SpinFactor("SF", SF_4Body::DtoPP1_PtoSP2_StoP3P4, 0, 1, 2, 3) );
SF_K1460_f0_1370.push_back( new SpinFactor("SF", SF_4Body::DtoPP1_PtoSP2_StoP3P4, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K1460_f0_1370;
LS_K1460_f0_1370.push_back( new Lineshape("K1460", K1460M, K1460W, 0, M_12_3, LS::BW, FF::BL2) );
LS_K1460_f0_1370.push_back( new Lineshape("f0_1370", f0_1370M, f0_1370W, 0, M_12, LS::BW, FF::BL2) );
LS_K1460_f0_1370.push_back( new Lineshape("K1460", K1460M, K1460W, 0, M_24_3, LS::BW, FF::BL2) );
LS_K1460_f0_1370.push_back( new Lineshape("f0_1370", f0_1370M, f0_1370W, 0, M_24, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K1_1270_K892;
SF_K1_1270_K892.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 0, 1, 2, 3) );
SF_K1_1270_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 3, 0) );
SF_K1_1270_K892.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 3, 1, 2, 0) );
SF_K1_1270_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 0, 3) );
std::vector<Lineshape*> LS_K1_1270_K892;
LS_K1_1270_K892.push_back( new Lineshape("K1_1270", K1_1270M, K1_1270W, 0, M_34_2, LS::BW, FF::BL2) );
LS_K1_1270_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K1_1270_K892.push_back( new Lineshape("K1_1270", K1_1270M, K1_1270W, 0, M_13_2, LS::BW, FF::BL2) );
LS_K1_1270_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K1_1270_rho770;
SF_K1_1270_rho770.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 3, 2, 0, 1) );
SF_K1_1270_rho770.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 2, 3) );
SF_K1_1270_rho770.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 0, 2, 3, 1) );
SF_K1_1270_rho770.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 3, 0) );
std::vector<Lineshape*> LS_K1_1270_rho770;
LS_K1_1270_rho770.push_back( new Lineshape("K1_1270", K1_1270M, K1_1270W, 0, M_12_3, LS::BW, FF::BL2) );
LS_K1_1270_rho770.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K1_1270_rho770.push_back( new Lineshape("K1_1270", K1_1270M, K1_1270W, 0, M_24_3, LS::BW, FF::BL2) );
LS_K1_1270_rho770.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K1_1270_K0_1430;
SF_K1_1270_K0_1430.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoSP2_StoP3P4, 0, 1, 2, 3) );
SF_K1_1270_K0_1430.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 3, 0) );
SF_K1_1270_K0_1430.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoSP2_StoP3P4, 3, 1, 2, 0) );
SF_K1_1270_K0_1430.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 2, 3) );
std::vector<Lineshape*> LS_K1_1270_K0_1430;
LS_K1_1270_K0_1430.push_back( new Lineshape("K(1)(1270)bar", K1_1270M, K1_1270W, 1, M_34_2 , LS::BW, FF::BL2) );
LS_K1_1270_K0_1430.push_back( new Lineshape("K(0)*(1430)", K0_1430M, K0_1430W, 0, M_34 , LS::Lass_M3, FF::BL2, 1.5, LassVars) );
LS_K1_1270_K0_1430.push_back( new Lineshape("K(1)(1270)bar2", K1_1270M, K1_1270W, 1, M_13_2 , LS::BW, FF::BL2) );
LS_K1_1270_K0_1430.push_back( new Lineshape("K(0)*1430)", K0_1430M, K0_1430W, 0, M_13 , LS::Lass_M3, FF::BL2, 1.5, LassVars) );
std::vector<SpinFactor*> SF_K1_1400_K892;
SF_K1_1400_K892.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 0, 1, 2, 3) );
SF_K1_1400_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 3, 0) );
SF_K1_1400_K892.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 3, 1, 2, 0) );
SF_K1_1400_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 0, 3) );
std::vector<Lineshape*> LS_K1_1400_K892;
LS_K1_1400_K892.push_back( new Lineshape("K1_1400", K1_1400M, K1_1400W, 0, M_34_2, LS::BW, FF::BL2) );
LS_K1_1400_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K1_1400_K892.push_back( new Lineshape("K1_1400", K1_1400M, K1_1400W, 0, M_13_2, LS::BW, FF::BL2) );
LS_K1_1400_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K2_1430_K892;
SF_K2_1430_K892.push_back( new SpinFactor("SF", SF_4Body::DtoTP1_TtoVP2_VtoP3P4, 0, 1, 2, 3) );
SF_K2_1430_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L2, 1, 2, 3, 0) );
SF_K2_1430_K892.push_back( new SpinFactor("SF", SF_4Body::DtoTP1_TtoVP2_VtoP3P4, 3, 1, 2, 0) );
SF_K2_1430_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L2, 1, 2, 0, 3) );
std::vector<Lineshape*> LS_K2_1430_K892;
LS_K2_1430_K892.push_back( new Lineshape("K2_1430", K2_1430M, K2_1430W, 2, M_34_2, LS::BW, FF::BL2) );
LS_K2_1430_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K2_1430_K892.push_back( new Lineshape("K2_1430", K2_1430M, K2_1430W, 2, M_13_2, LS::BW, FF::BL2) );
LS_K2_1430_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K2_1430_rho770;
SF_K2_1430_rho770.push_back( new SpinFactor("SF", SF_4Body::DtoTP1_TtoVP2_VtoP3P4, 3, 2, 0, 1) );
SF_K2_1430_rho770.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L2, 0, 1, 2, 3) );
SF_K2_1430_rho770.push_back( new SpinFactor("SF", SF_4Body::DtoTP1_TtoVP2_VtoP3P4, 0, 2, 3, 1) );
SF_K2_1430_rho770.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L2, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K2_1430_rho770;
LS_K2_1430_rho770.push_back( new Lineshape("K2_1430", K2_1430M, K2_1430W, 2, M_12_3, LS::BW, FF::BL2) );
LS_K2_1430_rho770.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K2_1430_rho770.push_back( new Lineshape("K2_1430", K2_1430M, K2_1430W, 2, M_24_3, LS::BW, FF::BL2) );
LS_K2_1430_rho770.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_a1_f0_600;
SF_a1_f0_600.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoSP2_StoP3P4, 2, 3, 0, 1) );
SF_a1_f0_600.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 3, 2) );
SF_a1_f0_600.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoSP2_StoP3P4, 2, 0, 3, 1) );
SF_a1_f0_600.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 3, 2) );
std::vector<Lineshape*> LS_a1_f0_600;
LS_a1_f0_600.push_back( new Lineshape("a(1)(1260)+", a1M, a1W, 1, M_12_4, LS::BW, FF::BL2, 5.71) );
LS_a1_f0_600.push_back( new Lineshape("f600", f600M, f600W, 0, M_12, LS::Bugg3, FF::BL2) );
LS_a1_f0_600.push_back( new Lineshape("a(1)(1260)+", a1M, a1W, 1, M_24_1, LS::BW, FF::BL2, 5.71) );
LS_a1_f0_600.push_back( new Lineshape("f600", f600M, f600W, 0, M_24, LS::Bugg3, FF::BL2) );
std::vector<SpinFactor*> SF_a1_rho770;
SF_a1_rho770.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 2, 3, 0, 1) );
SF_a1_rho770.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 3, 2) );
SF_a1_rho770.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 2, 0, 3, 1) );
SF_a1_rho770.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 3, 2) );
std::vector<Lineshape*> LS_a1_rho770;
LS_a1_rho770.push_back( new Lineshape("a(1)(1260)+", a1M, a1W, 0, M_12_4, LS::BW, FF::BL2, 5.71) );
LS_a1_rho770.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_a1_rho770.push_back( new Lineshape("a(1)(1260)+", a1M, a1W, 0, M_24_1, LS::BW, FF::BL2, 5.71) );
LS_a1_rho770.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_a1_rho770_D;
SF_a1_rho770_D.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2Dwave_VtoP3P4, 2, 3, 0, 1) );
SF_a1_rho770_D.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 3, 2) );
SF_a1_rho770_D.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2Dwave_VtoP3P4, 2, 0, 3, 1) );
SF_a1_rho770_D.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 3, 2) );
std::vector<Lineshape*> LS_a1_rho770_D;
LS_a1_rho770_D.push_back( new Lineshape("a(1)(1260)+", a1M, a1W, 2, M_12_4, LS::BW, FF::BL2, 5.71) );
LS_a1_rho770_D.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_a1_rho770_D.push_back( new Lineshape("a(1)(1260)+", a1M, a1W, 2, M_24_1, LS::BW, FF::BL2, 5.71) );
LS_a1_rho770_D.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_nonRes;
SF_nonRes.push_back( new SpinFactor("SF", SF_4Body::ONE, 2, 3, 0, 1) );
SF_nonRes.push_back( new SpinFactor("SF", SF_4Body::ONE, 2, 0, 3, 1) );
std::vector<Lineshape*> LS_nonRes;
LS_nonRes.push_back( new Lineshape("nonRes", a1M, a1W, 0, M_12, LS::ONE, FF::BL2) );
LS_nonRes.push_back( new Lineshape("nonRes", RhoMass, RhoWidth, 0, M_34, LS::ONE, FF::BL2) );
LS_nonRes.push_back( new Lineshape("nonRes", a1M, a1W, 0, M_12, LS::ONE, FF::BL2) );
LS_nonRes.push_back( new Lineshape("nonRes", RhoMass, RhoWidth, 0, M_34, LS::ONE, FF::BL2) );
std::vector<SpinFactor*> SF_NonResA_K892;
SF_NonResA_K892.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2Dwave_VtoP3P4, 0, 1, 2, 3) );
SF_NonResA_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 3, 0) );
SF_NonResA_K892.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2Dwave_VtoP3P4, 3, 1, 2, 0) );
SF_NonResA_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 0, 3) );
std::vector<Lineshape*> LS_NonResA_K892;
LS_NonResA_K892.push_back( new Lineshape("K1_1400", new Variable("NR1",0.0), new Variable("NR2",0.0), 2, M_34_2, LS::nonRes, FF::BL2) );
LS_NonResA_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_NonResA_K892.push_back( new Lineshape("K1_1400", new Variable("NR3",0.0), new Variable("NR4",0.0), 2, M_13_2, LS::nonRes, FF::BL2) );
LS_NonResA_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
// the very last parameter means that we have two permutations. so the first half of the Lineshapes
// and the first half of the spinfactors are amplitude 1, rest is amplitude 2
// This means that it is important for symmetrized amplitueds that the spinfactors and lineshapes are in the "right" order
//RS Model
Amplitude* amp_K892_rho770_S = new Amplitude( "K892_rho770_S", new Variable("K892_rho770_S_real", 1.0), new Variable("K892_rho770_S_imag", 0.0), LS_K892_rho770_S, SF_K892_rho770_S, 2);
Amplitude* amp_K892_rho770_P = new Amplitude( "K892_rho770_P", new Variable("K892_rho770_P_real", 1.0), new Variable("K892_rho770_P_imag", 0.0), LS_K892_rho770_P, SF_K892_rho770_P , 2);
Amplitude* amp_K892_rho770_D = new Amplitude( "K892_rho770_D", new Variable("K892_rho770_D_real", 1.0), new Variable("K892_rho770_D_imag",0.0), LS_K892_rho770_D, SF_K892_rho770_D, 2);
Amplitude* amp_K1410_rho770_P = new Amplitude( "K1410_rho770", new Variable("K1410_rho770_P_real", 4.001), new Variable("K1410_rho770_P_imag",-2.620), LS_K1410_rho770_P, SF_K1410_rho770_P, 2);
Amplitude* amp_K892_f0_600 = new Amplitude( "K892_f0600", new Variable("K892_f0600_real", -0.770), new Variable("K892_f0600_imag", -1.530), LS_K892_f0_600, SF_K892_f0_600, 2);
Amplitude* amp_rho1450_K0_1430 = new Amplitude( "rho1450_K0_1430", new Variable("rho1450_K0_1430_real", -0.110), new Variable("rho1450_K0_1430_imag", 1.814), LS_rho1450_K0_1430 , SF_rho1450_K0_1430 , 2);
Amplitude* amp_K1460_K892 = new Amplitude( "K1460_K892", new Variable("K1460_K892_real", -0.696), new Variable("K1460_K892_imag", 0.326), LS_K1460_K892 , SF_K1460_K892 , 2);
Amplitude* amp_K1460_f0_1370 = new Amplitude( "K1460_f0_1370", new Variable("K1460_f0_1370_real", -0.849), new Variable("K1460_f0_1370_imag", 0.972), LS_K1460_f0_1370 , SF_K1460_f0_1370 , 2);
Amplitude* amp_K1_1270_K892 = new Amplitude( "K1_1270_K892", new Variable("K1_1270_K892_real", 0.601), new Variable("K1_1270_K892_imag", -0.182), LS_K1_1270_K892 , SF_K1_1270_K892 , 2);
Amplitude* amp_K1_1270_rho770 = new Amplitude( "K1_1270_rho770", new Variable("K1_1270_rho770_real", -1.523), new Variable("K1_1270_rho770_imag", 1.244), LS_K1_1270_rho770 , SF_K1_1270_rho770 , 2);
Amplitude* amp_K1_1270_K0_1430 = new Amplitude( "K1_1270_K0_1430", new Variable("K1_1270_K0_1430_real", 0.248), new Variable("K1_1270_K0_1430_imag", -0.088), LS_K1_1270_K0_1430 , SF_K1_1270_K0_1430 , 2);
Amplitude* amp_K1_1400_K892 = new Amplitude( "K1_1400_K892", new Variable("K1_1400_K892_real", -0.808), new Variable("K1_1400_K892_imag", -0.358), LS_K1_1400_K892 , SF_K1_1400_K892 , 2);
Amplitude* amp_NonResA_K892 = new Amplitude( "NonResA_K892", new Variable("NonResA_K892_real", -15.322), new Variable("NonResA_K892_imag", -12.089), LS_NonResA_K892, SF_NonResA_K892, 2);
Amplitude* amp_K2_1430_K892 = new Amplitude( "K2_1430_K892", new Variable("K2_1430_K892_real", 17.008), new Variable("K2_1430_K892_imag", -5.014), LS_K2_1430_K892 , SF_K2_1430_K892 , 2);
Amplitude* amp_K2_1430_rho770 = new Amplitude( "K2_1430_rho770", new Variable("K2_1430_rho770_real", 13.039), new Variable("K2_1430_rho770_imag", -1.935), LS_K2_1430_rho770 , SF_K2_1430_rho770 , 2);
Amplitude* amp_a1_rho770 = new Amplitude( "a1_rho770", new Variable("a1_rho770_real", -0.639), new Variable("a1_rho770_imag", -6.801), LS_a1_rho770, SF_a1_rho770, 2);
Amplitude* amp_a1_f0_600 = new Amplitude( "a1_f0_600", new Variable("a1_f0_600_real", -0.062), new Variable("a1_f0_600_imag", 2.872), LS_a1_f0_600 , SF_a1_f0_600 , 2);
Amplitude* amp_a1_rho770_D = new Amplitude( "a1_rho770_D", new Variable("a1_rho770_D_real", -9.465), new Variable("a1_rho770_D_imag", 15.390), LS_a1_rho770_D, SF_a1_rho770_D, 2);
Amplitude* amp_nonRes = new Amplitude( "nonRes", new Variable("nonRes_real", -0.265), new Variable("nonRes_imag", -0.003), LS_nonRes, SF_nonRes, 2);
Amplitude* amp_WS_K892_rho770_S = new Amplitude("WS_K892_rho770_S", new Variable("WS_K892_rho770_S_real", 1.0), new Variable("WS_K892_rho770_S_imag", 0.0), LS_K892_rho770_S, SF_K892_rho770_S, 2);
Amplitude* amp_WS_K892_rho770_P = new Amplitude("WS_K892_rho770_P", new Variable("WS_K892_rho770_P_real", -0.109), new Variable("WS_K892_rho770_P_imag", 1.653), LS_K892_rho770_P, SF_K892_rho770_P , 2);
Amplitude* amp_WS_K892_rho770_D = new Amplitude("WS_K892_rho770_D", new Variable("WS_K892_rho770_D_real", 25.463), new Variable("WS_K892_rho770_D_imag", 2.662), LS_K892_rho770_D, SF_K892_rho770_D, 2);
Amplitude* amp_WS_rho1450_K0_1430 = new Amplitude("WS_rho1450_K0_1430", new Variable("WS_rho1450_K0_1430_real", 2.353), new Variable("WS_rho1450_K0_1430_imag", -0.234), LS_rho1450_K0_1430 , SF_rho1450_K0_1430 , 2);
Amplitude* amp_WS_K1_1270_K892 = new Amplitude("WS_K1_1270_K892", new Variable("WS_K1_1270_K892_real", -0.035), new Variable("WS_K1_1270_K892_imag", -1.405), LS_K1_1270_K892 , SF_K1_1270_K892 , 2);
Amplitude* amp_WS_K1_1270_rho770 = new Amplitude("WS_K1_1270_rho770", new Variable("WS_K1_1270_rho770_real", 2.42), new Variable("WS_K1_1270_rho770_imag", 2.471), LS_K1_1270_rho770 , SF_K1_1270_rho770 , 2);
Amplitude* amp_WS_K1_1270_K0_1430 = new Amplitude("WS_K1_1270_K0_1430", new Variable("WS_K1_1270_K0_1430_real", -1.990), new Variable("WS_K1_1270_K0_1430_imag", -2.105), LS_K1_1270_K0_1430 , SF_K1_1270_K0_1430 , 2);
Amplitude* amp_WS_K1_1400_K892 = new Amplitude("WS_K1_1400_K892", new Variable("WS_K1_1400_K892_real", -3.347), new Variable("WS_K1_1400_K892_imag", -2.237), LS_K1_1400_K892 , SF_K1_1400_K892 , 2);
Amplitude* amp_WS_nonRes = new Amplitude("WS_nonRes", new Variable("WS_nonRes_real", -0.456), new Variable("WS_nonRes_imag", -0.041), LS_nonRes, SF_nonRes, 2);
//DK3P_DI->amplitudes_B.push_back(amp_K892_rho770_S);
DK3P_DI->amplitudes_B.push_back(amp_K892_rho770_P);
DK3P_DI->amplitudes_B.push_back(amp_K892_rho770_D);
//DK3P_DI->amplitudes_B.push_back(amp_K1410_rho770_P);
//DK3P_DI->amplitudes_B.push_back(amp_K892_f0_600);
//DK3P_DI->amplitudes_B.push_back(amp_rho1450_K0_1430);
//DK3P_DI->amplitudes_B.push_back(amp_K1460_K892);
//DK3P_DI->amplitudes_B.push_back(amp_K1460_f0_1370);
//DK3P_DI->amplitudes_B.push_back(amp_K1_1270_K892);
//DK3P_DI->amplitudes_B.push_back(amp_K1_1270_rho770);
//DK3P_DI->amplitudes_B.push_back(amp_K1_1270_K0_1430);
//DK3P_DI->amplitudes_B.push_back(amp_K1_1400_K892);
//DK3P_DI->amplitudes_B.push_back(amp_NonResA_K892);
//DK3P_DI->amplitudes_B.push_back(amp_K2_1430_K892);
//DK3P_DI->amplitudes_B.push_back(amp_K2_1430_rho770);
//DK3P_DI->amplitudes_B.push_back(amp_a1_rho770);
//DK3P_DI->amplitudes_B.push_back(amp_a1_f0_600);
//DK3P_DI->amplitudes_B.push_back(amp_a1_rho770_D);
//DK3P_DI->amplitudes_B.push_back(amp_nonRes);
//DK3P_DI->amplitudes.push_back(amp_WS_K892_rho770_S);
//DK3P_DI->amplitudes.push_back(amp_WS_K892_rho770_P);
//DK3P_DI->amplitudes.push_back(amp_WS_K892_rho770_D);
//DK3P_DI->amplitudes.push_back(amp_WS_rho1450_K0_1430);
//DK3P_DI->amplitudes.push_back(amp_WS_K1_1270_K892);
//DK3P_DI->amplitudes.push_back(amp_WS_K1_1270_rho770);
//DK3P_DI->amplitudes.push_back(amp_WS_K1_1270_K0_1430);
//DK3P_DI->amplitudes.push_back(amp_WS_K1_1400_K892);
//DK3P_DI->amplitudes.push_back(amp_WS_nonRes);
DK3P_DI->_tau = new Variable("tau", 0.4101);
DK3P_DI->_xmixing = new Variable("xmixing", 0.0049);
DK3P_DI->_ymixing = new Variable("ymixing", 0.0061);
// DK3P_DI->_xmixing = new Variable("xmixing", 0, 0.00001, -.15, .15);
// DK3P_DI->_ymixing = new Variable("ymixing", 0, 0.00001, -.15, .15);
DK3P_DI->_SqWStoRSrate = new Variable("SqWStoRSrate", 1.0/sqrt(300.0));
Variable* m12 = new Variable("m12", 0, 3);
Variable* m34 = new Variable("m34", 0, 3);
Variable* cos12 = new Variable("cos12", -1, 1);
Variable* cos34 = new Variable("m12", -1, 1);
Variable* phi = new Variable("phi", -3.5, 3.5);
Variable* eventNumber = new Variable("eventNumber", 0, INT_MAX);
Variable* dtime = new Variable("dtime", 0, 10);
Variable* sigmat = new Variable("sigmat",-3,3);
Variable* constantOne = new Variable("constantOne", 1);
Variable* constantZero = new Variable("constantZero", 0);
std::vector<Variable*> vars;
vars.push_back(m12);
vars.push_back(m34);
vars.push_back(cos12);
vars.push_back(cos34);
vars.push_back(phi);
vars.push_back(eventNumber);
vars.push_back(dtime);
vars.push_back(sigmat);
UnbinnedDataSet currData(vars);
*DK3P_DI->_xmixing = strtof(argv[5], NULL);
*DK3P_DI->_ymixing = strtof(argv[6], NULL);
vector<Variable*> observables;
vector<Variable*> coefficients;
vector<Variable*> offsets;
observables.push_back(m12);
observables.push_back(m34);
observables.push_back(cos12);
observables.push_back(cos34);
observables.push_back(phi);
observables.push_back(eventNumber);
observables.push_back(dtime);
observables.push_back(sigmat);
offsets.push_back(constantZero);
offsets.push_back(constantZero);
coefficients.push_back(constantOne);
TruthResolution* dat = new TruthResolution();
PolynomialPdf* eff = new PolynomialPdf("constantEff", observables, coefficients, offsets, 0);
TDDP4* dp = new TDDP4("test_TD", observables, DK3P_DI, dat, eff, 0, 1);
//dp->setGenDecayTimeLimit(0,3.5); // this corresponds to rougly 97% of the exponential. So this should be ok. And speeds up Generation significantly compared to [0,5]
TFile *file = new TFile( argv[4] , "RECREATE");
TTree *tree = new TTree("events", "events");
double tm12_2,tm34_2,tc12_2,tc34_2,tphi_2,tdtime_2;
tree->Branch("m12", &tm12_2, "m12/D");
tree->Branch("m34", &tm34_2, "m34/D");
tree->Branch("c12", &tc12_2, "c12/D");
tree->Branch("c34", &tc34_2, "c34/D");
tree->Branch("phi", &tphi_2, "phi/D");
tree->Branch("dtime", &tdtime_2, "dtime/D");
mcbooster::FlagAcceptReject(0,0);
int generatedEvents = 0;
int RunNum = 0;
int BatchSize = strtoul(argv[1], NULL,0);
unsigned int offi = strtoul(argv[3], NULL,0);
unsigned int genEvts =strtoul(argv[2], NULL,0);
double wmax = 0;
mcbooster::FlagAcceptReject FlagIt = mcbooster::FlagAcceptReject(0.1,5);
while(generatedEvents < genEvts )
{
unsigned int keptEvts = 0;
dp->setGenerationOffset(offi);
auto tuple = dp->GenerateSig(BatchSize);
auto particles = std::get<0>(tuple);
auto variables = std::get<1>(tuple);
auto weights = std::get<2>(tuple);
auto flags = std::get<3>(tuple);
// int accepted = thrust::count_if(flags.begin(), flags.end(), thrust::identity<bool>());
++RunNum;
// generatedEvents += accepted;
for (int i = 0; i < weights.size(); ++i)
{
if (wmax<weights[i]) wmax = weights[i];
if (generatedEvents < genEvts && FlagIt(i,weights[i])){
++generatedEvents;
++keptEvts;
// printf("PF %i: %s %.5g %.5g %.5g %.5g %.5g %.5g\n",i, (bool)flags[i] ? "true" : "false", weights[i], (*(variables[0]))[i], (*(variables[1]))[i], (*(variables[2]))[i], (*(variables[3]))[i], (*(variables[4]))[i]);
tm12_2 = (*(variables[0]))[i];
tm34_2 = (*(variables[1]))[i];
tc12_2 = (*(variables[2]))[i];
tc34_2 = (*(variables[3]))[i];
tphi_2 = (*(variables[4]))[i];
tdtime_2 = (*(variables[5]))[i];
tree->Fill();
// printf("Buffer %i: %.5g %.5g %.5g %.5g %.5g %.5g \n",i, (*myweights)[i],(*Buffer_m12)[i], (*Buffer_m34)[i], (*Buffer_c12)[i], (*Buffer_c34)[i], (*Buffer_phi)[i], (*Buffer_dt)[i]);
}
}
fprintf(stderr,"Run # %i: x=%.6g y=%.6g Using accept-reject method leaves you with %i out of %i events. %.4g %% of Total offset: %u\n",RunNum, *DK3P_DI->_xmixing, *DK3P_DI->_ymixing, keptEvts, BatchSize, generatedEvents*100.0/genEvts, offi);
offi += BatchSize;
delete variables[0];
delete variables[1];
delete variables[2];
delete variables[3];
delete variables[4];
delete variables[5];
delete particles[0];
delete particles[1];
delete particles[2];
delete particles[3];
}
// printf("start\n");
// int i = 0;
// printf("Buffer %i: %.5g %.5g %.5g %.5g %.5g %.5g \n",i, (*myweights)[i],(*Buffer_m12)[i], (*Buffer_m34)[i], (*Buffer_c12)[i], (*Buffer_c34)[i], (*Buffer_phi)[i], (*Buffer_dt)[i]);
// printf("start2\n");
std::ofstream out;
string outname ="Max_observed_weights.txt";
out.open(outname.c_str(), std::ios::app);
out.precision(10);
out << wmax <<endl;
tree->Write();
file->Close();
// printf("overall wmax %f, keept %u evts, reweight ratio %.5g\n",wmax, keptEvts, (double)keptEvts/genEvts );
printf("%i\n",offi);
return 0;
}
| 1008bc1299e4820a5992c873f33dcb61942e4a84.cu | //THIS PROGRAM GENERATES MONTECARLO DATA GIVEN AN AMPLITUDE MODEL
//ROOT
#include <TFile.h>
#include <TTree.h>
#include <iostream>
#include <string>
// GooFit stuff
#include "goofit/Variable.h"
#include "goofit/PDFs/basic/PolynomialPdf.h"
#include "goofit/UnbinnedDataSet.h"
#include "goofit/PDFs/physics/DP4Pdf.h"
#include "goofit/PDFs/physics/TruthResolution_Aux.h"
#include "goofit/PDFs/physics/Tddp4Pdf.h"
#include <thrust/count.h>
#include <fstream>
#include <iomanip>
#include <numeric>
#include <algorithm>
#include <random>
#include <ctime>
#include <functional>
#include <mcbooster/functors/FlagAcceptReject.h>
using namespace std;
// Constants used in more than one PDF component.
const fptype _mD0 = 1.8645;
const fptype piPlusMass = 0.13957018;
const fptype KmMass = .493677;
int main (int argc, char** argv) {
// cudaSetDevice(0);
DecayInfo_DP* DK3P_DI = new DecayInfo_DP();
DK3P_DI->meson_radius =5;
DK3P_DI->particle_masses.push_back(_mD0);
DK3P_DI->particle_masses.push_back(piPlusMass);
DK3P_DI->particle_masses.push_back(piPlusMass);
DK3P_DI->particle_masses.push_back(KmMass);
DK3P_DI->particle_masses.push_back(piPlusMass);
Variable* RhoMass = new Variable("rho_mass", 0.77526);
Variable* RhoWidth = new Variable("rho_width", 0.1478);
Variable* K892M = new Variable("K892M", 0.89581);
Variable* K892W = new Variable("K892W", 0.0474);
Variable* f600M = new Variable("f600M", 0.519);
Variable* f600W = new Variable("f600W", 0.454);
Variable* a1M = new Variable("a1M", 1.237);
Variable* a1W = new Variable("a1W", 0.526);
Variable* K1_1270M = new Variable("K1_1270M", 1.28241);
Variable* K1_1270W = new Variable("K1_1270W", 0.06596);
Variable* K0_1430M = new Variable("K0_1430M", 1.425);
Variable* K0_1430W = new Variable("K0_1430W", 0.27);
Variable* K1410M = new Variable("K1410M", 1.414);
Variable* K1410W = new Variable("K1410W", 0.232);
Variable* rho1450M = new Variable("rho1450M", 1.465);
Variable* rho1450W = new Variable("rho1450W", 0.400);
Variable* K1460M = new Variable("K1460M", 1.351);
Variable* K1460W = new Variable("K1460W", 0.281);
Variable* f0_1370M = new Variable("f0_1370M", 1.350);
Variable* f0_1370W = new Variable("f0_1370W", 0.35);
Variable* K1_1400M = new Variable("K1_1400M", 1.403);
Variable* K1_1400W = new Variable("K1_1400W", 0.174);
Variable* K2_1430M = new Variable("K2_1430M", 1.4256);
Variable* K2_1430W = new Variable("K2_1430W", 0.0985);
std::vector<Variable*> LassVars;
LassVars.push_back( new Variable("lass_a",2.07) );
LassVars.push_back( new Variable("lass_r",3.32) );
LassVars.push_back( new Variable("lass_pf",0.0) );
LassVars.push_back( new Variable("lass_pr",0.0) );
LassVars.push_back( new Variable("lass_F",1.0) );
//Spin factors: we have two due to the bose symmetrization of the two pi+
std::vector<SpinFactor*> SF_K892_rho770_S;
SF_K892_rho770_S.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_S, 0, 1, 2, 3) );
SF_K892_rho770_S.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_S, 3, 1, 2, 0) );
//Lineshapes, also for both pi+ configurations
std::vector<Lineshape*> LS_K892_rho770_S;
LS_K892_rho770_S.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K892_rho770_S.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K892_rho770_S.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
LS_K892_rho770_S.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K892_rho770_P;
SF_K892_rho770_P.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_P, 0, 1, 2, 3) );
SF_K892_rho770_P.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 0, 1, 2, 3) );
SF_K892_rho770_P.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_P, 3, 1, 2, 0) );
SF_K892_rho770_P.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K892_rho770_P;
LS_K892_rho770_P.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K892_rho770_P.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K892_rho770_P.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
LS_K892_rho770_P.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K892_rho770_D;
SF_K892_rho770_D.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_D, 0, 1, 2, 3) );
SF_K892_rho770_D.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L2, 0, 1, 2, 3) );
SF_K892_rho770_D.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_D, 3, 1, 2, 0) );
SF_K892_rho770_D.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L2, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K892_rho770_D;
LS_K892_rho770_D.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K892_rho770_D.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K892_rho770_D.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
LS_K892_rho770_D.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K1410_rho770_S;
SF_K1410_rho770_S.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_S, 0, 1, 2, 3) );
SF_K1410_rho770_S.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_S, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K1410_rho770_S;
LS_K1410_rho770_S.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K1410_rho770_S.push_back( new Lineshape("K*(1410)", K1410M, K1410W, 1, M_34, LS::BW, FF::BL2) );
LS_K1410_rho770_S.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
LS_K1410_rho770_S.push_back( new Lineshape("K*(1410)", K1410M, K1410W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K1410_rho770_P;
SF_K1410_rho770_P.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_P, 0, 1, 2, 3) );
SF_K1410_rho770_P.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 0, 1, 2, 3) );
SF_K1410_rho770_P.push_back( new SpinFactor("SF", SF_4Body::DtoV1V2_V1toP1P2_V2toP3P4_P, 3, 1, 2, 0) );
SF_K1410_rho770_P.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K1410_rho770_P;
LS_K1410_rho770_P.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K1410_rho770_P.push_back( new Lineshape("K*(1410)", K1410M, K1410W, 1, M_34, LS::BW, FF::BL2) );
LS_K1410_rho770_P.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
LS_K1410_rho770_P.push_back( new Lineshape("K*(1410)", K1410M, K1410W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K892_f0_600;
SF_K892_f0_600.push_back( new SpinFactor("SF", SF_4Body::DtoVS_VtoP1P2_StoP3P4, 2, 3, 0, 1) );
SF_K892_f0_600.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 2, 3, 0, 1) );
SF_K892_f0_600.push_back( new SpinFactor("SF", SF_4Body::DtoVS_VtoP1P2_StoP3P4, 2, 0, 3, 1) );
SF_K892_f0_600.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 2, 0, 3, 1) );
std::vector<Lineshape*> LS_K892_f0_600;
LS_K892_f0_600.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K892_f0_600.push_back( new Lineshape("f600", f600M, f600W, 0, M_12, LS::Bugg3, FF::BL2) );
LS_K892_f0_600.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
LS_K892_f0_600.push_back( new Lineshape("f600", f600M, f600W, 0, M_24, LS::Bugg3, FF::BL2) );
std::vector<SpinFactor*> SF_rho1450_K0_1430;
SF_rho1450_K0_1430.push_back( new SpinFactor("SF", SF_4Body::DtoVS_VtoP1P2_StoP3P4, 0, 1, 2, 3) );
SF_rho1450_K0_1430.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 0, 1, 2, 3) );
SF_rho1450_K0_1430.push_back( new SpinFactor("SF", SF_4Body::DtoVS_VtoP1P2_StoP3P4, 3, 1, 2, 0) );
SF_rho1450_K0_1430.push_back( new SpinFactor("SF", SF_4Body::FF_12_34_L1, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_rho1450_K0_1430;
LS_rho1450_K0_1430.push_back( new Lineshape("rho(1450)", rho1450M, rho1450W, 1, M_12, LS::BW, FF::BL2) );
LS_rho1450_K0_1430.push_back( new Lineshape("K(0)*(1430)", K0_1430M, K0_1430W, 0, M_34, LS::Lass_M3, FF::BL2, 1.5, LassVars) );
LS_rho1450_K0_1430.push_back( new Lineshape("rho(1450)", rho1450M, rho1450W, 1, M_24, LS::BW, FF::BL2) );
LS_rho1450_K0_1430.push_back( new Lineshape("K(0)*(1430)", K0_1430M, K0_1430W, 0, M_13, LS::Lass_M3, FF::BL2, 1.5, LassVars) );
std::vector<SpinFactor*> SF_K1460_K892;
SF_K1460_K892.push_back( new SpinFactor("SF", SF_4Body::DtoPP1_PtoVP2_VtoP3P4, 0, 1, 2, 3) );
SF_K1460_K892.push_back( new SpinFactor("SF", SF_4Body::DtoPP1_PtoVP2_VtoP3P4, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K1460_K892;
LS_K1460_K892.push_back( new Lineshape("K1460", K1460M, K1460W, 1, M_34_2, LS::BW, FF::BL2) );
LS_K1460_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K1460_K892.push_back( new Lineshape("K1460", K1460M, K1460W, 1, M_13_2, LS::BW, FF::BL2) );
LS_K1460_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K1460_f0_1370;
SF_K1460_f0_1370.push_back( new SpinFactor("SF", SF_4Body::DtoPP1_PtoSP2_StoP3P4, 0, 1, 2, 3) );
SF_K1460_f0_1370.push_back( new SpinFactor("SF", SF_4Body::DtoPP1_PtoSP2_StoP3P4, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K1460_f0_1370;
LS_K1460_f0_1370.push_back( new Lineshape("K1460", K1460M, K1460W, 0, M_12_3, LS::BW, FF::BL2) );
LS_K1460_f0_1370.push_back( new Lineshape("f0_1370", f0_1370M, f0_1370W, 0, M_12, LS::BW, FF::BL2) );
LS_K1460_f0_1370.push_back( new Lineshape("K1460", K1460M, K1460W, 0, M_24_3, LS::BW, FF::BL2) );
LS_K1460_f0_1370.push_back( new Lineshape("f0_1370", f0_1370M, f0_1370W, 0, M_24, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K1_1270_K892;
SF_K1_1270_K892.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 0, 1, 2, 3) );
SF_K1_1270_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 3, 0) );
SF_K1_1270_K892.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 3, 1, 2, 0) );
SF_K1_1270_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 0, 3) );
std::vector<Lineshape*> LS_K1_1270_K892;
LS_K1_1270_K892.push_back( new Lineshape("K1_1270", K1_1270M, K1_1270W, 0, M_34_2, LS::BW, FF::BL2) );
LS_K1_1270_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K1_1270_K892.push_back( new Lineshape("K1_1270", K1_1270M, K1_1270W, 0, M_13_2, LS::BW, FF::BL2) );
LS_K1_1270_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K1_1270_rho770;
SF_K1_1270_rho770.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 3, 2, 0, 1) );
SF_K1_1270_rho770.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 2, 3) );
SF_K1_1270_rho770.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 0, 2, 3, 1) );
SF_K1_1270_rho770.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 3, 0) );
std::vector<Lineshape*> LS_K1_1270_rho770;
LS_K1_1270_rho770.push_back( new Lineshape("K1_1270", K1_1270M, K1_1270W, 0, M_12_3, LS::BW, FF::BL2) );
LS_K1_1270_rho770.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K1_1270_rho770.push_back( new Lineshape("K1_1270", K1_1270M, K1_1270W, 0, M_24_3, LS::BW, FF::BL2) );
LS_K1_1270_rho770.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K1_1270_K0_1430;
SF_K1_1270_K0_1430.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoSP2_StoP3P4, 0, 1, 2, 3) );
SF_K1_1270_K0_1430.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 3, 0) );
SF_K1_1270_K0_1430.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoSP2_StoP3P4, 3, 1, 2, 0) );
SF_K1_1270_K0_1430.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 2, 3) );
std::vector<Lineshape*> LS_K1_1270_K0_1430;
LS_K1_1270_K0_1430.push_back( new Lineshape("K(1)(1270)bar", K1_1270M, K1_1270W, 1, M_34_2 , LS::BW, FF::BL2) );
LS_K1_1270_K0_1430.push_back( new Lineshape("K(0)*(1430)", K0_1430M, K0_1430W, 0, M_34 , LS::Lass_M3, FF::BL2, 1.5, LassVars) );
LS_K1_1270_K0_1430.push_back( new Lineshape("K(1)(1270)bar2", K1_1270M, K1_1270W, 1, M_13_2 , LS::BW, FF::BL2) );
LS_K1_1270_K0_1430.push_back( new Lineshape("K(0)*1430)", K0_1430M, K0_1430W, 0, M_13 , LS::Lass_M3, FF::BL2, 1.5, LassVars) );
std::vector<SpinFactor*> SF_K1_1400_K892;
SF_K1_1400_K892.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 0, 1, 2, 3) );
SF_K1_1400_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 3, 0) );
SF_K1_1400_K892.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 3, 1, 2, 0) );
SF_K1_1400_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 0, 3) );
std::vector<Lineshape*> LS_K1_1400_K892;
LS_K1_1400_K892.push_back( new Lineshape("K1_1400", K1_1400M, K1_1400W, 0, M_34_2, LS::BW, FF::BL2) );
LS_K1_1400_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K1_1400_K892.push_back( new Lineshape("K1_1400", K1_1400M, K1_1400W, 0, M_13_2, LS::BW, FF::BL2) );
LS_K1_1400_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K2_1430_K892;
SF_K2_1430_K892.push_back( new SpinFactor("SF", SF_4Body::DtoTP1_TtoVP2_VtoP3P4, 0, 1, 2, 3) );
SF_K2_1430_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L2, 1, 2, 3, 0) );
SF_K2_1430_K892.push_back( new SpinFactor("SF", SF_4Body::DtoTP1_TtoVP2_VtoP3P4, 3, 1, 2, 0) );
SF_K2_1430_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L2, 1, 2, 0, 3) );
std::vector<Lineshape*> LS_K2_1430_K892;
LS_K2_1430_K892.push_back( new Lineshape("K2_1430", K2_1430M, K2_1430W, 2, M_34_2, LS::BW, FF::BL2) );
LS_K2_1430_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_K2_1430_K892.push_back( new Lineshape("K2_1430", K2_1430M, K2_1430W, 2, M_13_2, LS::BW, FF::BL2) );
LS_K2_1430_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_K2_1430_rho770;
SF_K2_1430_rho770.push_back( new SpinFactor("SF", SF_4Body::DtoTP1_TtoVP2_VtoP3P4, 3, 2, 0, 1) );
SF_K2_1430_rho770.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L2, 0, 1, 2, 3) );
SF_K2_1430_rho770.push_back( new SpinFactor("SF", SF_4Body::DtoTP1_TtoVP2_VtoP3P4, 0, 2, 3, 1) );
SF_K2_1430_rho770.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L2, 3, 1, 2, 0) );
std::vector<Lineshape*> LS_K2_1430_rho770;
LS_K2_1430_rho770.push_back( new Lineshape("K2_1430", K2_1430M, K2_1430W, 2, M_12_3, LS::BW, FF::BL2) );
LS_K2_1430_rho770.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_K2_1430_rho770.push_back( new Lineshape("K2_1430", K2_1430M, K2_1430W, 2, M_24_3, LS::BW, FF::BL2) );
LS_K2_1430_rho770.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_a1_f0_600;
SF_a1_f0_600.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoSP2_StoP3P4, 2, 3, 0, 1) );
SF_a1_f0_600.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 3, 2) );
SF_a1_f0_600.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoSP2_StoP3P4, 2, 0, 3, 1) );
SF_a1_f0_600.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 3, 2) );
std::vector<Lineshape*> LS_a1_f0_600;
LS_a1_f0_600.push_back( new Lineshape("a(1)(1260)+", a1M, a1W, 1, M_12_4, LS::BW, FF::BL2, 5.71) );
LS_a1_f0_600.push_back( new Lineshape("f600", f600M, f600W, 0, M_12, LS::Bugg3, FF::BL2) );
LS_a1_f0_600.push_back( new Lineshape("a(1)(1260)+", a1M, a1W, 1, M_24_1, LS::BW, FF::BL2, 5.71) );
LS_a1_f0_600.push_back( new Lineshape("f600", f600M, f600W, 0, M_24, LS::Bugg3, FF::BL2) );
std::vector<SpinFactor*> SF_a1_rho770;
SF_a1_rho770.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 2, 3, 0, 1) );
SF_a1_rho770.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 3, 2) );
SF_a1_rho770.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2_VtoP3P4, 2, 0, 3, 1) );
SF_a1_rho770.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 3, 2) );
std::vector<Lineshape*> LS_a1_rho770;
LS_a1_rho770.push_back( new Lineshape("a(1)(1260)+", a1M, a1W, 0, M_12_4, LS::BW, FF::BL2, 5.71) );
LS_a1_rho770.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_a1_rho770.push_back( new Lineshape("a(1)(1260)+", a1M, a1W, 0, M_24_1, LS::BW, FF::BL2, 5.71) );
LS_a1_rho770.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_a1_rho770_D;
SF_a1_rho770_D.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2Dwave_VtoP3P4, 2, 3, 0, 1) );
SF_a1_rho770_D.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 3, 2) );
SF_a1_rho770_D.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2Dwave_VtoP3P4, 2, 0, 3, 1) );
SF_a1_rho770_D.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 0, 1, 3, 2) );
std::vector<Lineshape*> LS_a1_rho770_D;
LS_a1_rho770_D.push_back( new Lineshape("a(1)(1260)+", a1M, a1W, 2, M_12_4, LS::BW, FF::BL2, 5.71) );
LS_a1_rho770_D.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_12, LS::BW, FF::BL2) );
LS_a1_rho770_D.push_back( new Lineshape("a(1)(1260)+", a1M, a1W, 2, M_24_1, LS::BW, FF::BL2, 5.71) );
LS_a1_rho770_D.push_back( new Lineshape("rho(770)", RhoMass, RhoWidth, 1, M_24, LS::BW, FF::BL2) );
std::vector<SpinFactor*> SF_nonRes;
SF_nonRes.push_back( new SpinFactor("SF", SF_4Body::ONE, 2, 3, 0, 1) );
SF_nonRes.push_back( new SpinFactor("SF", SF_4Body::ONE, 2, 0, 3, 1) );
std::vector<Lineshape*> LS_nonRes;
LS_nonRes.push_back( new Lineshape("nonRes", a1M, a1W, 0, M_12, LS::ONE, FF::BL2) );
LS_nonRes.push_back( new Lineshape("nonRes", RhoMass, RhoWidth, 0, M_34, LS::ONE, FF::BL2) );
LS_nonRes.push_back( new Lineshape("nonRes", a1M, a1W, 0, M_12, LS::ONE, FF::BL2) );
LS_nonRes.push_back( new Lineshape("nonRes", RhoMass, RhoWidth, 0, M_34, LS::ONE, FF::BL2) );
std::vector<SpinFactor*> SF_NonResA_K892;
SF_NonResA_K892.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2Dwave_VtoP3P4, 0, 1, 2, 3) );
SF_NonResA_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 3, 0) );
SF_NonResA_K892.push_back( new SpinFactor("SF", SF_4Body::DtoAP1_AtoVP2Dwave_VtoP3P4, 3, 1, 2, 0) );
SF_NonResA_K892.push_back( new SpinFactor("SF", SF_4Body::FF_123_4_L1, 1, 2, 0, 3) );
std::vector<Lineshape*> LS_NonResA_K892;
LS_NonResA_K892.push_back( new Lineshape("K1_1400", new Variable("NR1",0.0), new Variable("NR2",0.0), 2, M_34_2, LS::nonRes, FF::BL2) );
LS_NonResA_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_34, LS::BW, FF::BL2) );
LS_NonResA_K892.push_back( new Lineshape("K1_1400", new Variable("NR3",0.0), new Variable("NR4",0.0), 2, M_13_2, LS::nonRes, FF::BL2) );
LS_NonResA_K892.push_back( new Lineshape("K*(892)bar", K892M, K892W, 1, M_13, LS::BW, FF::BL2) );
// the very last parameter means that we have two permutations. so the first half of the Lineshapes
// and the first half of the spinfactors are amplitude 1, rest is amplitude 2
// This means that it is important for symmetrized amplitueds that the spinfactors and lineshapes are in the "right" order
//RS Model
Amplitude* amp_K892_rho770_S = new Amplitude( "K892_rho770_S", new Variable("K892_rho770_S_real", 1.0), new Variable("K892_rho770_S_imag", 0.0), LS_K892_rho770_S, SF_K892_rho770_S, 2);
Amplitude* amp_K892_rho770_P = new Amplitude( "K892_rho770_P", new Variable("K892_rho770_P_real", 1.0), new Variable("K892_rho770_P_imag", 0.0), LS_K892_rho770_P, SF_K892_rho770_P , 2);
Amplitude* amp_K892_rho770_D = new Amplitude( "K892_rho770_D", new Variable("K892_rho770_D_real", 1.0), new Variable("K892_rho770_D_imag",0.0), LS_K892_rho770_D, SF_K892_rho770_D, 2);
Amplitude* amp_K1410_rho770_P = new Amplitude( "K1410_rho770", new Variable("K1410_rho770_P_real", 4.001), new Variable("K1410_rho770_P_imag",-2.620), LS_K1410_rho770_P, SF_K1410_rho770_P, 2);
Amplitude* amp_K892_f0_600 = new Amplitude( "K892_f0600", new Variable("K892_f0600_real", -0.770), new Variable("K892_f0600_imag", -1.530), LS_K892_f0_600, SF_K892_f0_600, 2);
Amplitude* amp_rho1450_K0_1430 = new Amplitude( "rho1450_K0_1430", new Variable("rho1450_K0_1430_real", -0.110), new Variable("rho1450_K0_1430_imag", 1.814), LS_rho1450_K0_1430 , SF_rho1450_K0_1430 , 2);
Amplitude* amp_K1460_K892 = new Amplitude( "K1460_K892", new Variable("K1460_K892_real", -0.696), new Variable("K1460_K892_imag", 0.326), LS_K1460_K892 , SF_K1460_K892 , 2);
Amplitude* amp_K1460_f0_1370 = new Amplitude( "K1460_f0_1370", new Variable("K1460_f0_1370_real", -0.849), new Variable("K1460_f0_1370_imag", 0.972), LS_K1460_f0_1370 , SF_K1460_f0_1370 , 2);
Amplitude* amp_K1_1270_K892 = new Amplitude( "K1_1270_K892", new Variable("K1_1270_K892_real", 0.601), new Variable("K1_1270_K892_imag", -0.182), LS_K1_1270_K892 , SF_K1_1270_K892 , 2);
Amplitude* amp_K1_1270_rho770 = new Amplitude( "K1_1270_rho770", new Variable("K1_1270_rho770_real", -1.523), new Variable("K1_1270_rho770_imag", 1.244), LS_K1_1270_rho770 , SF_K1_1270_rho770 , 2);
Amplitude* amp_K1_1270_K0_1430 = new Amplitude( "K1_1270_K0_1430", new Variable("K1_1270_K0_1430_real", 0.248), new Variable("K1_1270_K0_1430_imag", -0.088), LS_K1_1270_K0_1430 , SF_K1_1270_K0_1430 , 2);
Amplitude* amp_K1_1400_K892 = new Amplitude( "K1_1400_K892", new Variable("K1_1400_K892_real", -0.808), new Variable("K1_1400_K892_imag", -0.358), LS_K1_1400_K892 , SF_K1_1400_K892 , 2);
Amplitude* amp_NonResA_K892 = new Amplitude( "NonResA_K892", new Variable("NonResA_K892_real", -15.322), new Variable("NonResA_K892_imag", -12.089), LS_NonResA_K892, SF_NonResA_K892, 2);
Amplitude* amp_K2_1430_K892 = new Amplitude( "K2_1430_K892", new Variable("K2_1430_K892_real", 17.008), new Variable("K2_1430_K892_imag", -5.014), LS_K2_1430_K892 , SF_K2_1430_K892 , 2);
Amplitude* amp_K2_1430_rho770 = new Amplitude( "K2_1430_rho770", new Variable("K2_1430_rho770_real", 13.039), new Variable("K2_1430_rho770_imag", -1.935), LS_K2_1430_rho770 , SF_K2_1430_rho770 , 2);
Amplitude* amp_a1_rho770 = new Amplitude( "a1_rho770", new Variable("a1_rho770_real", -0.639), new Variable("a1_rho770_imag", -6.801), LS_a1_rho770, SF_a1_rho770, 2);
Amplitude* amp_a1_f0_600 = new Amplitude( "a1_f0_600", new Variable("a1_f0_600_real", -0.062), new Variable("a1_f0_600_imag", 2.872), LS_a1_f0_600 , SF_a1_f0_600 , 2);
Amplitude* amp_a1_rho770_D = new Amplitude( "a1_rho770_D", new Variable("a1_rho770_D_real", -9.465), new Variable("a1_rho770_D_imag", 15.390), LS_a1_rho770_D, SF_a1_rho770_D, 2);
Amplitude* amp_nonRes = new Amplitude( "nonRes", new Variable("nonRes_real", -0.265), new Variable("nonRes_imag", -0.003), LS_nonRes, SF_nonRes, 2);
Amplitude* amp_WS_K892_rho770_S = new Amplitude("WS_K892_rho770_S", new Variable("WS_K892_rho770_S_real", 1.0), new Variable("WS_K892_rho770_S_imag", 0.0), LS_K892_rho770_S, SF_K892_rho770_S, 2);
Amplitude* amp_WS_K892_rho770_P = new Amplitude("WS_K892_rho770_P", new Variable("WS_K892_rho770_P_real", -0.109), new Variable("WS_K892_rho770_P_imag", 1.653), LS_K892_rho770_P, SF_K892_rho770_P , 2);
Amplitude* amp_WS_K892_rho770_D = new Amplitude("WS_K892_rho770_D", new Variable("WS_K892_rho770_D_real", 25.463), new Variable("WS_K892_rho770_D_imag", 2.662), LS_K892_rho770_D, SF_K892_rho770_D, 2);
Amplitude* amp_WS_rho1450_K0_1430 = new Amplitude("WS_rho1450_K0_1430", new Variable("WS_rho1450_K0_1430_real", 2.353), new Variable("WS_rho1450_K0_1430_imag", -0.234), LS_rho1450_K0_1430 , SF_rho1450_K0_1430 , 2);
Amplitude* amp_WS_K1_1270_K892 = new Amplitude("WS_K1_1270_K892", new Variable("WS_K1_1270_K892_real", -0.035), new Variable("WS_K1_1270_K892_imag", -1.405), LS_K1_1270_K892 , SF_K1_1270_K892 , 2);
Amplitude* amp_WS_K1_1270_rho770 = new Amplitude("WS_K1_1270_rho770", new Variable("WS_K1_1270_rho770_real", 2.42), new Variable("WS_K1_1270_rho770_imag", 2.471), LS_K1_1270_rho770 , SF_K1_1270_rho770 , 2);
Amplitude* amp_WS_K1_1270_K0_1430 = new Amplitude("WS_K1_1270_K0_1430", new Variable("WS_K1_1270_K0_1430_real", -1.990), new Variable("WS_K1_1270_K0_1430_imag", -2.105), LS_K1_1270_K0_1430 , SF_K1_1270_K0_1430 , 2);
Amplitude* amp_WS_K1_1400_K892 = new Amplitude("WS_K1_1400_K892", new Variable("WS_K1_1400_K892_real", -3.347), new Variable("WS_K1_1400_K892_imag", -2.237), LS_K1_1400_K892 , SF_K1_1400_K892 , 2);
Amplitude* amp_WS_nonRes = new Amplitude("WS_nonRes", new Variable("WS_nonRes_real", -0.456), new Variable("WS_nonRes_imag", -0.041), LS_nonRes, SF_nonRes, 2);
//DK3P_DI->amplitudes_B.push_back(amp_K892_rho770_S);
DK3P_DI->amplitudes_B.push_back(amp_K892_rho770_P);
DK3P_DI->amplitudes_B.push_back(amp_K892_rho770_D);
//DK3P_DI->amplitudes_B.push_back(amp_K1410_rho770_P);
//DK3P_DI->amplitudes_B.push_back(amp_K892_f0_600);
//DK3P_DI->amplitudes_B.push_back(amp_rho1450_K0_1430);
//DK3P_DI->amplitudes_B.push_back(amp_K1460_K892);
//DK3P_DI->amplitudes_B.push_back(amp_K1460_f0_1370);
//DK3P_DI->amplitudes_B.push_back(amp_K1_1270_K892);
//DK3P_DI->amplitudes_B.push_back(amp_K1_1270_rho770);
//DK3P_DI->amplitudes_B.push_back(amp_K1_1270_K0_1430);
//DK3P_DI->amplitudes_B.push_back(amp_K1_1400_K892);
//DK3P_DI->amplitudes_B.push_back(amp_NonResA_K892);
//DK3P_DI->amplitudes_B.push_back(amp_K2_1430_K892);
//DK3P_DI->amplitudes_B.push_back(amp_K2_1430_rho770);
//DK3P_DI->amplitudes_B.push_back(amp_a1_rho770);
//DK3P_DI->amplitudes_B.push_back(amp_a1_f0_600);
//DK3P_DI->amplitudes_B.push_back(amp_a1_rho770_D);
//DK3P_DI->amplitudes_B.push_back(amp_nonRes);
//DK3P_DI->amplitudes.push_back(amp_WS_K892_rho770_S);
//DK3P_DI->amplitudes.push_back(amp_WS_K892_rho770_P);
//DK3P_DI->amplitudes.push_back(amp_WS_K892_rho770_D);
//DK3P_DI->amplitudes.push_back(amp_WS_rho1450_K0_1430);
//DK3P_DI->amplitudes.push_back(amp_WS_K1_1270_K892);
//DK3P_DI->amplitudes.push_back(amp_WS_K1_1270_rho770);
//DK3P_DI->amplitudes.push_back(amp_WS_K1_1270_K0_1430);
//DK3P_DI->amplitudes.push_back(amp_WS_K1_1400_K892);
//DK3P_DI->amplitudes.push_back(amp_WS_nonRes);
DK3P_DI->_tau = new Variable("tau", 0.4101);
DK3P_DI->_xmixing = new Variable("xmixing", 0.0049);
DK3P_DI->_ymixing = new Variable("ymixing", 0.0061);
// DK3P_DI->_xmixing = new Variable("xmixing", 0, 0.00001, -.15, .15);
// DK3P_DI->_ymixing = new Variable("ymixing", 0, 0.00001, -.15, .15);
DK3P_DI->_SqWStoRSrate = new Variable("SqWStoRSrate", 1.0/sqrt(300.0));
Variable* m12 = new Variable("m12", 0, 3);
Variable* m34 = new Variable("m34", 0, 3);
Variable* cos12 = new Variable("cos12", -1, 1);
Variable* cos34 = new Variable("m12", -1, 1);
Variable* phi = new Variable("phi", -3.5, 3.5);
Variable* eventNumber = new Variable("eventNumber", 0, INT_MAX);
Variable* dtime = new Variable("dtime", 0, 10);
Variable* sigmat = new Variable("sigmat",-3,3);
Variable* constantOne = new Variable("constantOne", 1);
Variable* constantZero = new Variable("constantZero", 0);
std::vector<Variable*> vars;
vars.push_back(m12);
vars.push_back(m34);
vars.push_back(cos12);
vars.push_back(cos34);
vars.push_back(phi);
vars.push_back(eventNumber);
vars.push_back(dtime);
vars.push_back(sigmat);
UnbinnedDataSet currData(vars);
*DK3P_DI->_xmixing = strtof(argv[5], NULL);
*DK3P_DI->_ymixing = strtof(argv[6], NULL);
vector<Variable*> observables;
vector<Variable*> coefficients;
vector<Variable*> offsets;
observables.push_back(m12);
observables.push_back(m34);
observables.push_back(cos12);
observables.push_back(cos34);
observables.push_back(phi);
observables.push_back(eventNumber);
observables.push_back(dtime);
observables.push_back(sigmat);
offsets.push_back(constantZero);
offsets.push_back(constantZero);
coefficients.push_back(constantOne);
TruthResolution* dat = new TruthResolution();
PolynomialPdf* eff = new PolynomialPdf("constantEff", observables, coefficients, offsets, 0);
TDDP4* dp = new TDDP4("test_TD", observables, DK3P_DI, dat, eff, 0, 1);
//dp->setGenDecayTimeLimit(0,3.5); // this corresponds to rougly 97% of the exponential. So this should be ok. And speeds up Generation significantly compared to [0,5]
TFile *file = new TFile( argv[4] , "RECREATE");
TTree *tree = new TTree("events", "events");
double tm12_2,tm34_2,tc12_2,tc34_2,tphi_2,tdtime_2;
tree->Branch("m12", &tm12_2, "m12/D");
tree->Branch("m34", &tm34_2, "m34/D");
tree->Branch("c12", &tc12_2, "c12/D");
tree->Branch("c34", &tc34_2, "c34/D");
tree->Branch("phi", &tphi_2, "phi/D");
tree->Branch("dtime", &tdtime_2, "dtime/D");
mcbooster::FlagAcceptReject(0,0);
int generatedEvents = 0;
int RunNum = 0;
int BatchSize = strtoul(argv[1], NULL,0);
unsigned int offi = strtoul(argv[3], NULL,0);
unsigned int genEvts =strtoul(argv[2], NULL,0);
double wmax = 0;
mcbooster::FlagAcceptReject FlagIt = mcbooster::FlagAcceptReject(0.1,5);
while(generatedEvents < genEvts )
{
unsigned int keptEvts = 0;
dp->setGenerationOffset(offi);
auto tuple = dp->GenerateSig(BatchSize);
auto particles = std::get<0>(tuple);
auto variables = std::get<1>(tuple);
auto weights = std::get<2>(tuple);
auto flags = std::get<3>(tuple);
// int accepted = thrust::count_if(flags.begin(), flags.end(), thrust::identity<bool>());
++RunNum;
// generatedEvents += accepted;
for (int i = 0; i < weights.size(); ++i)
{
if (wmax<weights[i]) wmax = weights[i];
if (generatedEvents < genEvts && FlagIt(i,weights[i])){
++generatedEvents;
++keptEvts;
// printf("PF %i: %s %.5g %.5g %.5g %.5g %.5g %.5g\n",i, (bool)flags[i] ? "true" : "false", weights[i], (*(variables[0]))[i], (*(variables[1]))[i], (*(variables[2]))[i], (*(variables[3]))[i], (*(variables[4]))[i]);
tm12_2 = (*(variables[0]))[i];
tm34_2 = (*(variables[1]))[i];
tc12_2 = (*(variables[2]))[i];
tc34_2 = (*(variables[3]))[i];
tphi_2 = (*(variables[4]))[i];
tdtime_2 = (*(variables[5]))[i];
tree->Fill();
// printf("Buffer %i: %.5g %.5g %.5g %.5g %.5g %.5g \n",i, (*myweights)[i],(*Buffer_m12)[i], (*Buffer_m34)[i], (*Buffer_c12)[i], (*Buffer_c34)[i], (*Buffer_phi)[i], (*Buffer_dt)[i]);
}
}
fprintf(stderr,"Run # %i: x=%.6g y=%.6g Using accept-reject method leaves you with %i out of %i events. %.4g %% of Total offset: %u\n",RunNum, *DK3P_DI->_xmixing, *DK3P_DI->_ymixing, keptEvts, BatchSize, generatedEvents*100.0/genEvts, offi);
offi += BatchSize;
delete variables[0];
delete variables[1];
delete variables[2];
delete variables[3];
delete variables[4];
delete variables[5];
delete particles[0];
delete particles[1];
delete particles[2];
delete particles[3];
}
// printf("start\n");
// int i = 0;
// printf("Buffer %i: %.5g %.5g %.5g %.5g %.5g %.5g \n",i, (*myweights)[i],(*Buffer_m12)[i], (*Buffer_m34)[i], (*Buffer_c12)[i], (*Buffer_c34)[i], (*Buffer_phi)[i], (*Buffer_dt)[i]);
// printf("start2\n");
std::ofstream out;
string outname ="Max_observed_weights.txt";
out.open(outname.c_str(), std::ios::app);
out.precision(10);
out << wmax <<endl;
tree->Write();
file->Close();
// printf("overall wmax %f, keept %u evts, reweight ratio %.5g\n",wmax, keptEvts, (double)keptEvts/genEvts );
printf("%i\n",offi);
return 0;
}
|
f9a5bf3605d919ea68317c1a282e93d2847af099.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// cudamatrix/cu-kernels.cu
// Copyright 2009-2012 Karel Vesely
// 2013 Ehsan Variani
// 2013 Johns Hopkins University (author: Daniel Povey)
// 2013 Hainan Xu
// 2013 Xiaohui Zhang
// 2013-2015 Guoguo Chen
// 2016-2017 Shiyin Kang
// 2017 Hossein Hadian
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include <limits>
#include <math_constants.h>
#include "cudamatrix/cu-kernels-ansi.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while (nTotalThreads > 1) {
int32_cuda halfPoint = ((1 + nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x >= halfPoint) { // was <
// Get the shared value stored by another thread
Real temp = 0.0;
if (threadIdx.x < nTotalThreads) { // was +halfPoint
temp = buffer[threadIdx.x]; // was +halfPoint
}
buffer[threadIdx.x - halfPoint] += temp;
}
__syncthreads();
nTotalThreads = ((1 + nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
/*
* CuMatrix
*/
template<typename Real>
__global__
static void _copy_low_upp(Real* A, MatrixDim dimA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i <= j || i >= dimA.rows)
return;
int index_1 = i * dimA.stride + j;
int index_2 = j * dimA.stride + i;
A[index_2] = A[index_1];
}
template<typename Real>
__global__
static void _copy_upp_low(Real* A, MatrixDim dimA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j <= i || j >= dimA.rows)
return;
int index_1 = i * dimA.stride + j;
int index_2 = j * dimA.stride + i;
A[index_2] = A[index_1];
}
// mat += diag(vec) * mat2.
template<typename Real>
__global__
static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim,
const Real *vec, const Real *mat2,
int mat2_row_stride, int mat2_col_stride,
Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = j * mat_dim.stride + i, index2 = j * mat2_row_stride
+ i * mat2_col_stride;
if (i < mat_dim.cols && j < mat_dim.rows) {
mat[index] = alpha * vec[j] * mat2[index2] + beta * mat[index];
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dmat.cols && j < dmat.rows) {
int32_cuda index_B = (j * (j + 1) / 2) + i;
int32_cuda index_A = j * dmat.stride + i;
if (i <= j) {
A[index_A] = B[index_B];
} else {
A[index_A] = 0.0;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) {
// we interpret these indexes oppositely from normal, but it doesn't
// matter as it's invoked in a symmetric way.
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
// transpose the indices used to index the source TpMatrix.
if (i < dmat.rows && j < dmat.cols) {
int32_cuda index_B = (j * (j + 1) / 2) + i;
int32_cuda index_A = i * dmat.stride + j;
if (i <= j) {
A[index_A] = B[index_B];
} else {
A[index_A] = 0.0;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row-index.
int32_cuda index_out = i + j * d_out.stride;
int32_cuda index_in = i + j * d_in.stride;
if (i < d_out.cols && j < d_out.rows)
mat_out[index_out] = static_cast<Real>(mat_in[index_in]);
}
template<int TileDim, typename Real, typename OtherReal>
__global__
static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in,
MatrixDim d_out, MatrixDim d_in) {
// Use shared meme to achieve both coalesced memory reading and writing
// '+1' to avoid bank conflict when reading sbuf
__shared__ Real sbuf[TileDim][TileDim + 1];
const int32_cuda i_in = blockIdx.y * TileDim + threadIdx.y; // row-index
const int32_cuda j_in = blockIdx.x * TileDim + threadIdx.x; // col-index
const int32_cuda tile_stride_in = CU1DBLOCK / TileDim * d_in.stride;
int32_cuda index_in = i_in * d_in.stride + j_in;
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_in + i < d_in.rows && j_in < d_in.cols) {
sbuf[threadIdx.y + i][threadIdx.x] = static_cast<Real>(mat_in[index_in]);
}
index_in += tile_stride_in;
}
__syncthreads();
// Grid is transposed, but block is not yet.
// Warp (blockDim.x) is always along the row-dim.
const int32_cuda i_out = blockIdx.x * TileDim + threadIdx.y;
const int32_cuda j_out = blockIdx.y * TileDim + threadIdx.x;
const int32_cuda tile_stride_out = CU1DBLOCK / TileDim * d_out.stride;
int32_cuda index_out = i_out * d_out.stride + j_out;
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_out + i < d_out.rows && j_out < d_out.cols) {
// block is tranposed when reading sbuf
mat_out[index_out] = sbuf[threadIdx.x][threadIdx.y + i];
}
index_out += tile_stride_out;
}
}
// Copy from CSR sparse matrix to dense matrix
//
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows.
template<typename Real, typename OtherReal>
__global__
static void _copy_from_smat(Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const OtherReal* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx
if (i < mat_dim.rows) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx
mat[i * mat_dim.stride + j] = static_cast<Real>(smat_val[nz_id]);
}
}
}
/// Select a subset of the rows of a CSR SparseMatrix.
/// Sets 'out' to only the rows of 'in' that are listed
/// in 'row_indexes'. 'row_indexes' must be sorted and unique,
/// and satisfy 0 <= row_indexes[i] < in.size().
///
/// Note: 'out_row_ptr' is an input parameter that is calculated before
/// calling this kernel function
///
/// We use warpSize threads per row to access only the nnz elements.
/// Every CU1DBLOCK/warpSize rows share one thread block.
/// 1D grid to cover all selected rows.
template<typename Real>
__global__
static void _select_rows(const int* out_row_ptr, int* out_col_idx,
Real* out_val, const int* row_indexes,
const int num_selected_rows, const int* in_row_ptr,
const int* in_col_idx, const Real* in_val) {
const int out_i = blockIdx.x * blockDim.y + threadIdx.y; // out row idx
if (out_i < num_selected_rows) {
const int in_i = row_indexes[out_i];
const int in_row_start = in_row_ptr[in_i];
const int out_row_start = out_row_ptr[out_i];
const int row_length = in_row_ptr[in_i + 1] - in_row_start;
for (int k = threadIdx.x; k < row_length; k += warpSize) {
const int in_n = in_row_start + k;
const int out_n = out_row_start + k;
out_col_idx[out_n] = in_col_idx[in_n];
out_val[out_n] = in_val[in_n];
}
}
}
// mat += alpha * smat
//
// We use warpSize threads per row to access only the nonzero elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _add_smat(Real* mat, MatrixDim mat_dim, Real alpha,
const int* smat_row_ptr, const int* smat_col_idx,
const Real* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx
if (i < mat_dim.rows) {
const int row_start = smat_row_ptr[i];
const int row_end = smat_row_ptr[i + 1];
for (int n = row_start + threadIdx.x; n < row_end; n += warpSize) {
const int j = smat_col_idx[n]; // col idx of smat
mat[i * mat_dim.stride + j] += alpha * smat_val[n];
}
}
}
// mat += alpha * smat^T
//
// We use warpSize threads per row to access only the nonzero elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _add_smat_trans(Real* mat, MatrixDim mat_dim, Real alpha,
const int* smat_row_ptr, const int* smat_col_idx,
const Real* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx
if (i < mat_dim.cols) {
const int row_start = smat_row_ptr[i];
const int row_end = smat_row_ptr[i + 1];
for (int n = row_start + threadIdx.x; n < row_end; n += warpSize) {
const int j = smat_col_idx[n]; // col idx of smat
mat[j * mat_dim.stride + i] += alpha * smat_val[n];
}
}
}
/// For each element x of the matrix, set it to
/// (x < 0 ? exp(x) : x + 1).
/// Use block/grid sizes for simple matrix ops
template<typename T>
__global__
static void _apply_exp_special(T* out, MatrixDim out_dim, const T* in,
int in_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < out_dim.rows && j < out_dim.cols) {
T x = in[i * in_stride + j];
if (x < T(0)) {
out[i * out_dim.stride + j] = exp(x);
} else {
out[i * out_dim.stride + j] = x + T(1);
}
}
}
/// Fill the array 'data' with the sequence [base ... base + length)
/// Use 1D block and 1D grid
template<typename T>
__global__
static void _sequence(T* data, int length, T base) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < length) {
data[i] = base + T(i);
}
}
// Copy from CSR sparse matrix to transposed dense matrix
//
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows.
template<typename Real, typename OtherReal>
__global__
static void _copy_from_smat_trans(Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr,
const int* smat_col_idx,
const OtherReal* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat
if (i < mat_dim.cols) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx of smat
mat[j * mat_dim.stride + i] = static_cast<Real>(smat_val[nz_id]);
}
}
}
// First stage of trace(mat * smat^T)
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _trace_mat_smat_trans(const Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr,
const int* smat_col_idx, const Real* smat_val,
Real* trace_vec) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat
if (i < mat_dim.rows) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx of smat
trace_vec[nz_id] = mat[i * mat_dim.stride + j] * smat_val[nz_id];
}
}
}
// First stage of trace(mat * smat)
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _trace_mat_smat(const Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const Real* smat_val, Real* trace_vec) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat
if (i < mat_dim.cols) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx of smat
trace_vec[nz_id] = mat[j * mat_dim.stride + i] * smat_val[nz_id];
}
}
}
template<typename Real>
__global__
static void _apply_exp(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
mat[index] = exp(mat[index]);
}
}
template<typename Real>
__global__
static void _scale_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
mat[index] = value * mat[index];
}
}
template<typename Real>
__global__
static void _set_diag(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = i + i * d.stride;
if (i < d.rows && i < d.cols) {
mat[index] = value;
}
}
template<typename Real>
__global__
static void _set_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
mat[index] = value;
}
}
template<typename Real>
__global__
static void _add_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
mat[index] = mat[index] + value;
}
}
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = value;
}
template<typename Real>
__global__
static void _set_zero_above_diag(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < i)
mat[index] = 0.0;
}
template<typename Real>
__global__
static void _add(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] + value;
}
template<typename Real>
__global__
static void _scale(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] * value;
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride;
if (i < dst_d.cols && j < dst_d.rows)
mat[dst_index] = mat[dst_index] * A[src_index];
}
template<typename Real>
__global__
static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride;
if (i < dst_d.cols && j < dst_d.rows)
mat[dst_index] = mat[dst_index] / A[src_index];
}
template<typename Real>
__global__
static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride;
if (i < dst_d.cols && j < dst_d.rows) {
Real a = mat[dst_index], b = A[src_index];
mat[dst_index] = fmax(a, b);
}
}
template<typename Real>
__global__
static void _min(Real* mat, const Real* other, MatrixDim mat_d,
int other_stride) {
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda mat_index = i * mat_d.stride + j;
int32_cuda other_index = i * other_stride + j;
if (j < mat_d.cols && i < mat_d.rows) {
Real a = mat[mat_index], b = other[other_index];
mat[mat_index] = fmin(a, b);
}
}
template<typename Real>
__global__
static void _vec_mul_elements(Real* v, const Real* a, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
v[i] = v[i] * a[i];
}
template<typename Real>
__global__
static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d,
int src_stride, int group_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols) {
int dst_index = i + j * d.stride;
int src_index = i / group_size + j * src_stride;
y[dst_index] *= x[src_index];
}
}
template<typename Real>
__global__
void _diff_group_pnorm(Real *id, const Real *iv, const Real *ov, const Real* od,
MatrixDim id_dim, int iv_stride, int ov_stride,
int od_stride, int group_size, Real power) {
const int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < id_dim.cols) {
const int grid_stride = gridDim.y * blockDim.y;
const int src_j = j / group_size;
int i = blockIdx.y * blockDim.y + threadIdx.y;
for (; i < id_dim.rows; i += grid_stride) {
const int iv_index = j + i * iv_stride;
Real iv_ij = iv[iv_index];
Real ans;
if (power == Real(2)) {
const int ov_index = src_j + i * ov_stride;
Real ov_ij = ov[ov_index];
ans = ov_ij <= 0.0 ? 0.0 : iv_ij / ov_ij;
} else if (power == Real(1)) {
Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1);
ans = (iv_ij == Real(0) ? 0.0 : iv_ij_sign);
} else if (power
== (sizeof(Real) == sizeof(float) ? CUDART_INF_F : CUDART_INF)) {
const int ov_index = src_j + i * ov_stride;
Real ov_ij = ov[ov_index];
Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1);
ans =
ov_ij <= 0.0 ?
0.0 : (iv_ij_sign * (abs(iv_ij) == ov_ij ? 1.0 : 0.0));
} else {
const int ov_index = src_j + i * ov_stride;
Real ov_ij = ov[ov_index];
Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1);
if (ov_ij <= 0.0) {
ans = 0.0; // The derivative is either 0 or undefined at the origin.
} else {
ans = iv_ij_sign * pow(std::abs(iv_ij), power - 1)
* pow(ov_ij, 1 - power);
}
}
const int od_index = src_j + i * od_stride;
const int id_index = j + i * id_dim.stride;
id[id_index] = ans * od[od_index];
}
}
}
/// deriv is the derivative we will output; vec is the input we're computing
/// the group max on, "maxv" is the previously computed group max.
template<typename Real>
__global__
static void _calc_group_max_deriv(Real *deriv, const Real *vec,
const Real *maxv, MatrixDim deriv_dim,
int vec_stride, int maxv_stride,
int group_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < deriv_dim.rows && i < deriv_dim.cols) {
int deriv_index = i + j * deriv_dim.stride;
int vec_index = i + j * vec_stride;
int maxv_index = i / group_size + j * maxv_stride;
Real vec_element = vec[vec_index], // The element of the original vector.
max_element = maxv[maxv_index]; // this is the max value
Real ans = (max_element == vec_element ? 1.0 : 0.0);
deriv[deriv_index] = ans;
}
}
/// Set each element to y = (x == orig ? changed : x).
template<typename Real>
__global__
static void _replace_value(Real *vec, int dim, Real orig, Real changed) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
if (vec[i] == orig)
vec[i] = changed;
}
template<typename Real>
__global__
static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) {
const int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y;
if (i < d.rows) {
const int32_cuda start = i * d.stride;
const Real scale = Real(1) / vec_div[i];
const int32_cuda grid_stride = blockDim.x * gridDim.x;
for (int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; j < d.cols; j +=
grid_stride) {
mat[start + j] *= scale;
}
}
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int32_cuda index = i + j * d.stride;
int32_cuda index_src = i + j * src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
int32_cuda index_src = j + i * src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_blocks(Real alpha, const Real* src,
int32_cuda num_row_blocks,
int32_cuda num_col_blocks, Real* dst, MatrixDim d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
int32_cuda index_src = i + j * src_stride;
if (i < d.cols && j < d.rows)
for (int32_cuda p = 0; p < num_row_blocks; p++) {
for (int32_cuda q = 0; q < num_col_blocks; q++) {
dst[index] = alpha
* src[index_src + p * src_stride * d.rows + q * d.cols]
+ dst[index];
}
}
}
template<typename Real>
__global__
static void _add_mat_repeated(Real alpha, const Real* src,
MatrixDim src_dim, Real* dst,
MatrixDim dst_dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda src_i = i % src_dim.cols,
src_j = j % src_dim.rows,
dst_index = i + j * dst_dim.stride,
src_index = src_i + src_j * src_dim.stride;
if (i < dst_dim.cols && j < dst_dim.rows)
dst[dst_index] += alpha * src[src_index];
}
template<typename Real>
__global__
static void _add_mat_blocks_trans(Real alpha, const Real* src,
int32_cuda num_row_blocks,
int32_cuda num_col_blocks, Real* dst,
MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
int32_cuda index_src = j + i * src_stride;
if (i < d.cols && j < d.rows)
for (int32_cuda p = 0; p < num_row_blocks; p++) {
for (int32_cuda q = 0; q < num_col_blocks; q++) {
dst[index] = alpha
* src[index_src + p * src_stride * d.cols + q * d.rows]
+ dst[index];
}
}
}
template<typename Real>
__global__
static void _set_mat_mat_div_mat(const Real* A, const Real* B, const Real* C,
Real* dst, MatrixDim d, int stride_a,
int stride_b, int stride_c) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride, a_index = i + j * stride_a, b_index = i
+ j * stride_b, c_index = i + j * stride_c;
if (i < d.cols && j < d.rows)
if (C[c_index] == 0)
dst[index] = A[a_index];
else
dst[index] = A[a_index] * B[b_index] / C[c_index];
}
// Given a matrix input S (not packed!) and a lower-triangular matrix L, this
// function does S = beta S + alpha * L^T L. This is used in PSD matrix
// inversion. The i index is the row of the destination S and the j the column
// (although of course the output is symmetric so it doesn't matter in a sense).
// The main point of this is to make use of various symmetries and zero-ness.
template<typename Real>
__global__
static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim,
Real *S, MatrixDim sdim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= sdim.rows || j > i)
return;
// this thread computes the dot-product of the i'th column of
// L with the j'th column of L. The values we're multiplying
// are only nonzero for row-index k greater or equal to
// max(i, j), which equals i.
Real sum = 0.0;
for (int k = i; k < sdim.rows; k++) {
int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k;
sum += T[i_index] * T[j_index];
}
int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i;
S[output_index1] = alpha * sum + beta * S[output_index1];
S[output_index2] = alpha * sum + beta * S[output_index2];
}
template<typename Real>
__global__
static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst,
MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * col[j] + beta * dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst,
MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * row[i] + beta * dst[index];
}
template<typename Real>
__global__
static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat,
MatrixDim dmask) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * dmat.stride;
int32_cuda index2 = i + j * dmask.stride;
if (i < dmat.cols && j < dmat.rows)
if (mask[index2] == 0)
mat[index] = 0;
}
template<typename Real>
__global__
static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim,
const Real *mat2, int mat2_row_stride,
int mat2_col_stride, const Real *vec, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * mat_dim.stride, index2 = i * mat2_col_stride
+ j * mat2_row_stride;
if (j < mat_dim.rows && i < mat_dim.cols)
mat[index] = alpha * mat2[index2] * vec[i] + beta * mat[index];
}
template<typename Real>
__global__
static void _add_mat_mat_elements(Real *data, const Real *srcA_data,
const Real *srcB_data, MatrixDim dim,
int srcA_stride, int srcB_stride, Real alpha,
Real beta) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda tgt_index = i + j * dim.stride;
int32_cuda srcA_index = i + j * srcA_stride;
int32_cuda srcB_index = i + j * srcB_stride;
if (i < dim.cols && j < dim.rows) {
data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index]
+ beta * data[tgt_index];
}
}
/*
* CuVector
*/
// very limited application!
template<typename Real>
__global__
static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2,
Real param_3, int* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
Real ratio = a[i] / param_3;
if ((ratio < 0.0) || (ratio >= 1.01)) {
*flag = 1;
return;
}
if (ratio < param_1) {
Real factor = ((param_1 / ratio) > param_2) ? param_2 : (param_1 / ratio);
v[i] = v[i] / factor;
} else if (ratio > param_1) {
Real factor = ((ratio / param_1) > param_2) ? param_2 : (ratio / param_1);
v[i] = v[i] * factor;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _cublas_copy_kaldi(int n, const Real* x, int incx, OtherReal* y,
int incy) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
y[i * incy] = static_cast<OtherReal>(x[i * incx]);
}
}
// This kernel writes a copy of the vector "v_in" to each row of the matrix
// "m_out". the dimension of v_in should be equal to the #columns of m_out.
template<typename Real>
__global__
static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index.
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index.
if (i < d.cols && j < d.rows) {
int index = i + j * d.stride;
m_out[index] = v_in[i];
}
}
// This kernel writes a copy of the vector "v_in" to each col of the matrix
// "m_out". the dimension of v_in should be equal to the #row of m_out.
template<typename Real>
__global__
static void _copy_cols_from_vec(Real* m_out, MatrixDim d, const Real* v_in) {
int i = blockIdx.y * blockDim.y + threadIdx.y; // row id
int j = blockIdx.x * blockDim.x + threadIdx.x; // col id
if (i < d.rows && j < d.cols) {
m_out[i * d.stride + j] = v_in[i];
}
}
// _trace_mat_mat reduce the partial sum to
// value[blockIdx.y * gridDim.x + blockIdx.x]
// It use shared mem to transpose matrix B to ensure coalesced memory access
template<int TileDim, typename Real>
__global__
static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA,
int B_stride, Real* value) {
// Reuse shared mem and make indexing easier. "+1" to avoid bank conflict
__shared__ union {
Real trans[TileDim][TileDim + 1];
Real sum[CU1DBLOCK];
} smem;
// linear thread id;
const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x;
const int32_cuda grid_height = gridDim.y * TileDim;
const int32_cuda ja = blockIdx.x * TileDim + threadIdx.x;
const int32_cuda ib = blockIdx.x * TileDim + threadIdx.y;
int32_cuda ia = blockIdx.y * TileDim + threadIdx.y;
int32_cuda jb = blockIdx.y * TileDim + threadIdx.x;
// Grid reduce
Real tsum = Real(0);
for (int32_cuda i0 = 0; i0 < dA.rows; i0 += grid_height) {
// Load from B, transpose the block and store in shared mem
if (jb < dA.rows) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (ib + i < dA.cols) {
smem.trans[threadIdx.x][threadIdx.y + i] =
B[(ib + i) * B_stride + jb];
}
}
}
__syncthreads();
// Load from A, sum up the product.
if (ja < dA.cols) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (ia + i < dA.rows) {
tsum += A[(ia + i) * dA.stride + ja]
* smem.trans[threadIdx.y + i][threadIdx.x];
}
}
}
__syncthreads();
ia += grid_height;
jb += grid_height;
}
smem.sum[tid] = tsum;
__syncthreads();
// Block reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
smem.sum[tid] += smem.sum[tid + shift];
__syncthreads();
}
// Warp reduce. Implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem.sum[tid] += smem.sum[tid + shift];
}
}
// output 1 sum per thread block
if (tid == 0) {
value[blockIdx.y * gridDim.x + blockIdx.x] = smem.sum[0];
}
}
// _trace_mat_mat_trans reduce the partial sum to
// value[blockIdx.y * gridDim.x + blockIdx.x]
template<typename Real>
__global__
static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA,
int B_stride, Real* value) {
__shared__ Real ssum[CU1DBLOCK];
// linear thread id;
const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x;
const int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
const int32_cuda grid_height = gridDim.y * blockDim.y;
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y;
// Grid reduce
Real tsum = Real(0);
if (j < dA.cols) {
while (i < dA.rows) {
tsum += A[i * dA.stride + j] * B[i * B_stride + j];
i += grid_height;
}
}
ssum[tid] = tsum;
__syncthreads();
// Block reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
ssum[tid] += ssum[tid + shift];
__syncthreads();
}
// Warp reduce. Implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// output 1 sum per thread block
if (tid == 0) {
value[blockIdx.y * gridDim.x + blockIdx.x] = ssum[0];
}
}
// v = alpha * diag(M * N^T) + beta * v
template<typename Real>
__global__
static void _add_diag_mat_mat_MNT(const Real alpha, const Real* M,
const MatrixDim dim_M, const Real* N,
const int stride_N, const Real beta,
Real* v) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int m_start = i * dim_M.stride;
const int n_start = i * stride_N;
// Loop along the matrix row. Reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < dim_M.cols; j += CU1DBLOCK) {
tsum += M[m_start + j] * N[n_start + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
ssum[tid] += ssum[tid + shift];
__syncthreads();
}
// Warp reduce to 1 element. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// output 1 sum per thread block
if (tid == 0) {
v[i] = alpha * ssum[0] + beta * v[i];
}
}
// v = alpha * diag(M^T * N) + beta * v
template<int TileDim, typename Real>
__global__
static void _add_diag_mat_mat_MTN(const Real alpha, const Real* M,
const int stride_M, const Real* N,
const MatrixDim dim_N, const Real beta,
Real* v) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= dim_N.cols)
return;
// Loop along the matrix column.
// Reduce to CU1DBLOCK / TileDim elements per column.
Real tsum = Real(0);
for (int i = threadIdx.y; i < dim_N.rows; i += blockDim.y) {
tsum += M[i * stride_M + j] * N[i * dim_N.stride + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize / TileDim elements per column.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize && shift >= TileDim;
shift >>= 1) {
if (tid < shift) {
ssum[tid] += ssum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element per column.
// Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift >= TileDim; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// output TileDim sums per thread block
if (tid < TileDim) {
v[j] = alpha * ssum[tid] + beta * v[j];
}
}
// v = alpha * diag(M * N) + beta * v
template<int TileDim, typename Real>
__global__
static void _add_diag_mat_mat_MN(const Real alpha, const Real* M,
const int stride_M, const Real* N,
const MatrixDim dim_N, const Real beta,
Real* v) {
// Reuse shared mem and make indexing easier. "+1" to avoid bank conflict
__shared__ union {
Real trans[TileDim][TileDim + 1];
Real sum[CU1DBLOCK];
} smem;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int i_m = blockIdx.x * TileDim + threadIdx.y;
const int j_n = blockIdx.x * TileDim + threadIdx.x;
int i_n = threadIdx.y;
int j_m = threadIdx.x;
// Loop along the matrix column.
// Reduce to CU1DBLOCK / TileDim elements per column.
Real tsum = Real(0);
for (int block_i_n = 0; block_i_n < dim_N.rows; block_i_n += TileDim) {
// Load, transpose and store M to shared mem.
if (j_m < dim_N.rows) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_m + i < dim_N.cols) {
smem.trans[threadIdx.x][threadIdx.y + i] = M[(i_m + i) * stride_M
+ j_m];
}
}
}
__syncthreads();
// Load N, sum up the product.
if (j_n < dim_N.cols) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_n + i < dim_N.rows) {
tsum += N[(i_n + i) * dim_N.stride + j_n]
* smem.trans[threadIdx.y + i][threadIdx.x];
}
}
}
__syncthreads();
i_n += TileDim;
j_m += TileDim;
}
smem.sum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize / TileDim elements per column.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize && shift >= TileDim;
shift >>= 1) {
if (tid < shift) {
smem.sum[tid] += smem.sum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element per column.
// Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift >= TileDim; shift >>= 1) {
smem.sum[tid] += smem.sum[tid + shift];
}
}
// output TileDim sums per thread block
if (tid < TileDim && j_n < dim_N.cols) {
v[j_n] = alpha * smem.sum[tid] + beta * v[j_n];
}
}
template<typename Real>
__global__
static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y,
Real beta, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = alpha * x[i] * y[i] + beta * v[i];
}
template<typename Real>
__global__
static void _copy_col_from_mat_df(double* v, int col, const Real* mat,
MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = (double) mat[index];
}
template<typename Real>
__global__
static void _copy_col_from_mat_fd(float* v, int col, const Real* mat,
MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = (float) mat[index];
}
template<typename Real>
__global__
static void _vec_apply_exp(Real* v, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
v[i] = exp(v[i]);
}
}
template<typename Real>
__global__
static void _vec_apply_log(Real* v, Real* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
if (v[i] < 0) {
*flag = 1;
return;
}
v[i] = log(v[i]);
}
}
template<typename Real>
__global__
static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z,
MatrixDim d, Real* z2, MatrixDim d2, Real* t) {
int i = threadIdx.x;
__shared__ Real tot_objf[CU1DBLOCK];
__shared__ Real tot_weight[CU1DBLOCK];
Real tmp_weight_sum = 0;
Real tmp_tot_objf = 0;
int size = s / CU1DBLOCK; //the least size in a loop (later part)
int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1
int loop_start;
int loop_end;
if (i < threshold) {
loop_start = i * (size + 1);
loop_end = (i + 1) * (size + 1);
} else {
loop_start = threshold + i * size;
loop_end = threshold + (i + 1) * size;
}
for (int j = loop_start; j < loop_end; j++) {
//* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) );
int m = (x + j)->row;
//*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int));
int label = (x + j)->column;
// *(Real*) ((size_t)x + j*(2*sizeof(int) + sizeof(Real)) + 2*sizeof(int));
Real weight = (x + j)->weight;
tmp_weight_sum += weight;
Real this_prob = *(z + m * d.stride + label);
tmp_tot_objf += weight * log(this_prob);
// there might be problems here....
*(z2 + m * d2.stride + label) += weight / this_prob;
}
tot_objf[i] = tmp_tot_objf;
tot_weight[i] = tmp_weight_sum;
__syncthreads();
*t = _sum_reduce(tot_objf);
__syncthreads();
*(t + 1) = _sum_reduce(tot_weight);
return;
}
template<typename Real>
__global__
static void _cuda_vector_copy_elements(Real *data, int dim,
const Real *src_mat, int mat_stride,
bool transpose,
const MatrixIndexT_cuda* elements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= dim)
return;
int j = elements[i];
int mat_index;
if (transpose)
mat_index = i + j * mat_stride;
else
mat_index = j + i * mat_stride;
data[i] = src_mat[mat_index];
}
template<typename Real>
__global__
static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha,
MatrixElement<Real>* x,
int num_elements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= num_elements)
return;
data[x[i].row * dim.stride + x[i].column] += alpha * x[i].weight;
}
template<typename Real>
__global__
static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha,
const Int32Pair* indices,
const Real* x, int s, Real* data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= s)
return;
int data_i = indices[i].first * dim.stride + indices[i].second;
data[data_i] += alpha * x[i];
}
template<typename Real>
__global__
static void _cuda_matrix_add_to_elements(Real alpha,
Real* mat, MatrixDim dim,
const MatrixIndexT_cuda* elements) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < dim.rows) {
int col = elements[row];
if (col >= 0) {
int index = col + row * dim.stride;
mat[index] += alpha;
}
}
}
template<typename Real>
__global__
static void _matrix_lookup(const Real *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
Real *output) {
int ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= indices_size)
return;
int data_ind = indices[ind].first * dim.stride + indices[ind].second;
output[ind] = data[data_ind];
}
template<typename Real>
__global__
static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask,
MatrixDim mat1_dim, int mat2_stride,
int mask_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row
int32_cuda index_mat1 = i + j * mat1_dim.stride;
int32_cuda index_mat2 = i + j * mat2_stride;
int32_cuda index_mask = i + j * mask_stride;
if (i < mat1_dim.cols && j < mat1_dim.rows)
mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0);
}
enum EnumTransformReduce {
SUMAB, SUM, MAX, MIN, LINFNORM, L2NORM, L1NORM, L0NORM, LPNORM
};
template<EnumTransformReduce TransReduceType, typename Real>
struct TransReduceOp {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return Real(0);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return Real(0);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return Real(0);
}
};
template<typename Real>
struct TransReduceOp<SUMAB, Real> {
const Real alpha_;
const Real beta_;
TransReduceOp(const Real& a, const Real& b) :
alpha_(a), beta_(b) {
}
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
if (beta_ == Real(0)) {
return alpha_ * x;
} else {
return alpha_ * x + beta_ * output;
}
}
};
template<typename Real>
struct TransReduceOp<SUM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<MAX, Real> {
__forceinline__
__device__ Real InitValue() const {
return sizeof(Real) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF;
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return fmax(a, b);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<MIN, Real> {
__forceinline__
__device__ Real InitValue() const {
return sizeof(Real) == sizeof(float) ? CUDART_INF_F : CUDART_INF;
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return min(a, b);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<LINFNORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return abs(x);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return fmax(a, b);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<L2NORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x * x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return sqrt(x);
}
};
template<typename Real>
struct TransReduceOp<L1NORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return abs(x);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<L0NORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return Real(x == Real(0) ? 0 : 1);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<LPNORM, Real> {
const Real power_;
TransReduceOp(const Real& p) :
power_(p) {
}
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return pow(abs(x), power_);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return pow(x, Real(1) / power_);
}
};
// Vector reduce.
template<EnumTransformReduce TransReduceType, typename Real>
__global__
static void _vec_transform_reduce(
const Real* v, Real* result, const int dim, const int inc,
const TransReduceOp<TransReduceType, Real> op) {
__shared__ Real sdata[CU1DBLOCK];
Real tdata = op.InitValue();
const int tid = threadIdx.x;
const int vec_len = dim * inc;
const int grid_stride = gridDim.x * blockDim.x * inc;
int i = (blockIdx.x * blockDim.x + tid) * inc;
// Grid reduce. Loop over the whole vector v.
for (; i < vec_len; i += grid_stride) {
tdata = op.Reduce(tdata, op.Transform(v[i]));
}
sdata[tid] = tdata;
__syncthreads();
// Tree reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
__syncthreads();
}
// Reduce last warp. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
}
// Output to vector result.
if (tid == 0)
result[blockIdx.x] = op.PostReduce(sdata[0], result[blockIdx.x]);
}
// Reduce a matrix 'mat' to a column vector 'result'
template<EnumTransformReduce TransReduceType, typename Real>
__global__
static void _transform_reduce_mat_cols(
Real *result, const Real *mat, const MatrixDim d,
const TransReduceOp<TransReduceType, Real> op) {
__shared__ Real sdata[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int row_start = i * d.stride;
Real tdata = op.InitValue();
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
tdata = op.Reduce(tdata, op.Transform(mat[row_start + j]));
}
sdata[tid] = tdata;
__syncthreads();
// Tree reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
__syncthreads();
}
// Reduce last warp. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1)
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
// Output to vector result.
if (tid == 0) {
result[i] = op.PostReduce(sdata[0], result[i]);
}
}
template<EnumTransformReduce TransReduceType, typename Real>
__global__
static void _group_transform_reduce(
Real *y, const Real *x, const MatrixDim d, const int src_stride,
const int group_size, const TransReduceOp<TransReduceType, Real> op) {
__shared__ Real sreduction[CU1DBLOCK];
const int i = blockIdx.x;
const int x_start = i * src_stride;
const int y_start = i * d.stride;
const int threads_per_group = blockDim.x;
// Reduce n groups per thread block
const int n = blockDim.y;
const int len = group_size * n;
// linear thread id
const int tid = threadIdx.y * threads_per_group + threadIdx.x;
int j = threadIdx.y * group_size + threadIdx.x; // col-id of *x
int group_id = threadIdx.y; // col-id of *y
int group_end = x_start + (group_id + 1) * group_size;
while (group_id < d.cols) {
// reduce to threads_per_group elements per group
int x_idx = x_start + j;
Real treduction = op.Transform(x[x_idx]);
x_idx += threads_per_group;
while (x_idx < group_end) {
treduction = op.Reduce(treduction, op.Transform(x[x_idx]));
x_idx += threads_per_group;
}
sreduction[tid] = treduction;
if (threads_per_group > warpSize) {
__syncthreads();
}
// tree-reduce to 2x warpSize elements per group
# pragma unroll
for (int shift = threads_per_group / 2; shift > warpSize; shift >>= 1) {
if (threadIdx.x < shift) {
sreduction[tid] = op.Reduce(sreduction[tid], sreduction[tid + shift]);
}
__syncthreads();
}
// Warp-reduce to 1 element per group.
// Threads implicitly synchronized within the warp.
const int warp_reduce_size =
threads_per_group / 2 < warpSize ? threads_per_group / 2 : warpSize;
if (threadIdx.x < warp_reduce_size) {
# pragma unroll
for (int shift = warp_reduce_size; shift > 0; shift >>= 1) {
sreduction[tid] = op.Reduce(sreduction[tid], sreduction[tid + shift]);
}
}
// Store the result.
if (threadIdx.x == 0) {
y[y_start + group_id] = op.PostReduce(sreduction[tid],
y[y_start + group_id]);
}
j += len;
group_end += len;
group_id += n;
}
}
template<typename Real>
__global__
static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
if (v[i] < floor_val) {
v[i] = floor_val;
count[i] = 1;
} else {
count[i] = 0;
}
}
}
template<typename Real>
__global__
static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count,
int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
if (v[i] > ceiling_val) {
v[i] = ceiling_val;
count[i] = 1;
} else {
count[i] = 0;
}
}
}
template<typename Real>
__global__
static void _apply_pow(Real* mat, Real power, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (power == 1.0)
return;
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
if (!(mat[index] >= 0.0))
return;
mat[index] = sqrt(mat[index]);
} else {
mat[index] = pow(mat[index], power);
}
}
}
template<typename Real>
__global__
static void _apply_pow_abs(Real* mat, Real power, bool include_sign,
MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (include_sign == true && mat[index] < 0) {
if (power == 1.0)
mat[index] = -std::abs(mat[index]);
if (power == 2.0) {
mat[index] = -mat[index] * mat[index];
} else if (power == 0.5) {
mat[index] = -sqrt(std::abs(mat[index]));
} else {
mat[index] = -pow(std::abs(mat[index]), power);
}
} else {
if (power == 1.0)
mat[index] = std::abs(mat[index]);
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
mat[index] = sqrt(std::abs(mat[index]));
} else if (power < 0.0 && mat[index] == 0.0) {
mat[index] = 0.0;
} else {
mat[index] = pow(std::abs(mat[index]), power);
}
}
}
}
template<typename Real>
__global__
static void _apply_heaviside(Real* mat, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0);
}
template<typename Real>
__global__
static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (mat[index] < floor_val)
mat[index] = floor_val;
}
}
template<typename Real>
__global__
static void _copy_cols(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int index = reorder[i], dst_index = j * dst_dim.stride + i;
if (index >= 0) {
int src_index = j * src_stride + reorder[i];
Real val = src[src_index];
dst[dst_index] = val;
} else {
dst[dst_index] = 0.0;
}
}
}
template<typename Real>
__global__
static void _add_cols(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int index = reorder[i], dst_index = j * dst_dim.stride + i;
if (index >= 0) {
int src_index = j * src_stride + index;
Real val = src[src_index];
dst[dst_index] += val;
}
}
}
template<typename Real>
__global__
static void _copy_rows(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int index = reorder[j], dst_index = j * dst_dim.stride + i;
if (index >= 0) {
int src_index = reorder[j] * src_stride + i;
Real val = src[src_index];
dst[dst_index] = val;
} else {
dst[dst_index] = 0;
}
}
}
template<typename Real>
__global__
static void _copy_rows(Real* dst, const Real * const *src, MatrixDim dst_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
const Real *pointer = src[j];
if (pointer != NULL) {
dst[dst_index] = pointer[i];
} else {
dst[dst_index] = 0;
}
}
}
template<typename Real>
__global__
static void _copy_to_rows(Real* const * dst, const Real *src,
MatrixDim src_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < src_dim.cols && j < src_dim.rows) {
Real *pointer = dst[j];
if (pointer != NULL) {
pointer[i] = src[j * src_dim.stride + i];
}
}
}
template<typename Real>
__global__
static void _add_rows(Real alpha, Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
if (reorder[j] >= 0) {
int src_index = reorder[j] * src_stride + i;
dst[dst_index] += alpha * src[src_index];
}
}
}
template<typename Real>
__global__
static void _add_rows(Real alpha, Real* dst, const Real * const *src,
MatrixDim dst_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
if (src[j] != NULL) {
dst[dst_index] += alpha * src[j][i];
}
}
}
template<typename Real>
__global__
static void _add_to_rows(Real alpha, Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim src_dim,
int dst_stride) {
int c = blockIdx.x * blockDim.x + threadIdx.x; // col index
int r = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (c < src_dim.cols && r < src_dim.rows) {
int src_index = r * src_dim.stride + c;
if (reorder[r] >= 0) {
int dst_index = reorder[r] * dst_stride + c;
dst[dst_index] += alpha * src[src_index];
}
}
}
template<typename Real>
__global__
static void _add_to_rows(Real alpha, Real* const * dst, const Real *src,
MatrixDim src_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < src_dim.cols && j < src_dim.rows) {
if (dst[j] != NULL) {
dst[j][i] += alpha * src[j * src_dim.stride + i];
}
}
}
template<typename Real>
__global__
static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (mat[index] > ceiling_val)
mat[index] = ceiling_val;
}
}
template<typename Real>
__global__
static void _invert_elements(Real* data, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows)
data[index] = 1.0 / data[index];
}
// matrix-wise, do data = alpha * data + beta * A * B^T,
// where B is a block matrix.
template<typename Real>
__global__
static void _add_mat_blockmat_trans(Real *data, MatrixDim dim,
const Real *A_data, int A_num_rows,
int A_num_cols, int A_row_stride,
int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks)
return;
const CuBlockMatrixData &cu_data = B_cu_data[j];
// BT means B transposed.
int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset,
BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols =
cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride;
// Cast from void;
const Real *B_data = static_cast<Real*>(cu_data.matrix_data);
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < BT_num_cols; k++) {
const Real *this_BT_col = B_data + k * BT_col_stride;
const Real *this_A_row = A_data + i * A_row_stride
+ BT_row_start * A_col_stride;
// this_A_row points to the element A[i][BT_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B.
sum += this_BT_col[l] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + BT_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
template<typename Real>
__global__
static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data,
int A_num_rows, int A_num_cols, int A_row_stride,
int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks)
return;
const CuBlockMatrixData &block_data = B_cu_data[j];
int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset,
B_num_rows = block_data.matrix_dim.rows, B_num_cols =
block_data.matrix_dim.cols, B_row_stride =
block_data.matrix_dim.stride;
// Cast from void;
const Real *B_data = static_cast<Real*>(block_data.matrix_data);
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < B_num_cols; k++) {
const Real *this_B_col = B_data + k;
const Real *this_A_row = A_data + i * A_row_stride
+ B_row_start * A_col_stride;
// this_A_row points to the element A[i][B_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < B_num_rows; l++) // l indexes rows of B.
sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + B_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
// For a block matrix B, does B = alpha * C * D + beta * B.
// the (x,y,z) indices are the block index, then the row
// and column indices within the block. Note: transposition of C and D
// is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride),
// so it's invisible to this code. The num-cols and num-rows of C and D
// are only provided to the extent that they are not already determined
// by other quantities.
template<typename Real>
__global__
static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks,
const Real *C_data, int C_num_cols,
int C_row_stride, int C_col_stride,
const Real *D_data, int D_row_stride,
int D_col_stride, Real alpha, Real beta) {
int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B.
int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block
int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block
if (b >= num_blocks)
return;
const CuBlockMatrixData &block_data = B_cu_data[b];
if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols)
return; // we're outside the dimensions of the b'th block.
// B_elem is the element of B we're writing to.
Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data)
+ i * block_data.matrix_dim.stride + j;
Real B_val = *B_elem;
// B_row and B_col are the (row, col) index into the full matrix B.
int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j;
const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data
+ D_col_stride * B_col;
Real sum = 0.0;
for (int k = 0; k < C_num_cols; k++) {
sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride];
}
*B_elem = alpha * sum + beta * B_val;
}
template<typename Real>
__global__
static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim,
const Real *A_data, int A_num_rows,
int A_num_cols, int A_row_stride,
int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha,
Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks)
return;
const CuBlockMatrixData &cu_data = B_cu_data[j];
// BT means B transposed.
int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset,
BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols =
cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride;
// Cast from void;
const Real *B_data = static_cast<Real*>(cu_data.matrix_data);
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < BT_num_cols; k++) {
const Real *this_BT_col = B_data + k * BT_col_stride;
const Real *this_A_row = A_data + i * A_row_stride
+ BT_row_start * A_col_stride;
// this_A_row points to the element A[i][BT_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B.
sum += this_BT_col[l] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + BT_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
template<typename Real>
__global__
static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data,
MatrixDim src_dim, const Int32Pair *indices) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= dim.rows || col >= dim.cols)
return;
int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride
+ indices[col].first, src_end_index = row * src_dim.stride
+ indices[col].second;
Real sum = 0.0;
for (int index = src_start_index; index < src_end_index; index++)
sum += src_data[index];
data[dst_index] = sum;
}
template<typename Real>
__global__
static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data,
MatrixDim src_dim, const Int32Pair *indexes) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= dim.rows || col >= dim.cols)
return;
int dst_index = row * dim.stride + col;
int src_index_start = indexes[row].first, src_index_end = indexes[row].second;
for (int row_index = src_index_start; row_index < src_index_end; row_index++)
data[dst_index] += src_data[row_index * src_dim.stride + col];
}
template<typename Real>
__global__
static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
// compute the function y[index] = log(1 + exp(x[index]))
if (i < d.cols && j < d.rows) {
Real val = x[src_index], result;
if (val >= 10.0)
result = val; // function approaches y=x as x gets large
else
result = log1p(exp(val));
y[dst_index] = result;
}
}
template<typename Real>
__global__
static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride,
int group_size, Real power) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols) {
int dst_index = i + j * d.stride;
Real tmp = 0;
int src_begin_index = i * group_size + j * src_stride;
int src_end_index = src_begin_index + group_size;
for (int src_index = src_begin_index; src_index < src_end_index;
src_index++) {
tmp += pow(std::abs(x[src_index]), power);
}
tmp = pow(tmp, Real(1.0 / power));
if (!isnan(tmp)) {
y[dst_index] = tmp;
} else {
Real max_value = x[src_begin_index], min_value = max_value;
for (int src_index = src_begin_index + 1; src_index < src_end_index;
src_index++) {
if (x[src_index] > max_value)
max_value = x[src_index];
if (x[src_index] < min_value)
min_value = x[src_index];
}
tmp = 0.0;
// let max_value be the largest abs(value)
Real max_abs_value = (max_value > -min_value ? max_value : -min_value);
if (max_abs_value == 0) {
y[dst_index] = 0.0;
} else {
for (int src_index = src_begin_index; src_index < src_end_index;
src_index++) {
Real x_scaled = x[src_index] / max_abs_value;
tmp += pow(std::abs(x_scaled), Real(power));
}
y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value;
}
}
}
}
/*
* cu::
*/
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real res = 1.0 / (1.0 + exp(-x[src_index]));
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d,
int e_stride, int y_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride;
int e_index = i + j * e_stride;
int y_index = i + j * y_stride;
if (i < d.cols && j < d.rows)
eout[dst_index] = y[y_index] * (1.0 - y[y_index]) * e[e_index];
}
template<typename Real>
__global__
static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real exp_2x = exp(2.0 * x[src_index]);
Real res;
if (isinf(exp_2x)) {
res = 1.0;
} else {
res = (exp_2x - 1.0) / (exp_2x + 1.0);
}
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d,
int e_stride, int y_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride;
int e_index = i + j * e_stride;
int y_index = i + j * y_stride;
if (i < d.cols && j < d.rows)
eout[dst_index] = (1.0 - y[y_index] * y[y_index]) * e[e_index];
}
template<typename Real>
__global__
static void _parametric_relu(Real* y, const Real* x, MatrixDim d, int src_stride,
const Real* a, const Real* b) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride,
src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real res = (x[src_index] > 0.0) ? a[i] * x[src_index] : b[i] * x[src_index];
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_parametric_relu(Real* eout, const Real* e, const Real* y,
MatrixDim d, int e_stride, int y_stride,
const Real* a, const Real* b) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride;
int e_index = i + j * e_stride;
int y_index = i + j * y_stride;
if (i < d.cols && j < d.rows )
eout[dst_index] = (y[y_index] > 0.0 ? a[i] * e[e_index] : b[i] * e[e_index]);
}
template<typename Real>
__global__
static void _heaviside(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real res = (x[src_index] > 0.0 ? 1.0 : 0.0);
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) {
__shared__ Real smem[CU1DBLOCK];
const int i = blockIdx.x;
const int x_start = i * src_stride;
const int y_start = i * d.stride;
const int tid = threadIdx.x;
// find max element of the row
// reduce to CU1DBLOCK elements per row.
Real tmax = sizeof(Real) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF;
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
tmax = fmax(tmax, x[x_start + j]);
}
smem[tid] = tmax;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
}
// broadcast max to all threads
__syncthreads();
Real max = smem[0];
// sum_j(exp(x(i,j)-max))
// reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
tsum += exp(x[x_start + j] - max);
}
smem[tid] = tsum;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] += smem[tid + shift];
}
}
// broadcast sum to all threads
__syncthreads();
Real inv_sum = Real(1) / smem[0];
// normalize the row
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
y[y_start + j] = exp(x[x_start + j] - max) * inv_sum;
}
}
// The output y_i = scale * x_i,
// and we want to RMS value of the y_i to equal target_rms,
// so y^t y = D * target_rms^2 (if y is one row of the input).
// we need to have scale = 1.0 / sqrt(x^t x / (D * target_rms^2)).
// there is also flooring involved, to avoid division-by-zero
// problems. It's important for the backprop, that the floor's
// square root is exactly representable as float.
// If add_log_stddev is true, log(max(epsi, sqrt(x^t x / D)))
// is an extra dimension of the output.
//
// 1D grid is used. Each 256-thread block works on 1 row of the data matrix.
// The block is also of 1D. Strided memory access is used if the length of the
// row is longer than 256.
template<typename Real>
__global__
static void _normalize_per_row(Real *y, int y_stride, const Real *x,
MatrixDim x_d, Real target_rms,
bool add_log_stddev) {
const int i = blockIdx.x;
const int tid = threadIdx.x;
const Real* x_row = x + i * x_d.stride;
__shared__ Real ssum[CU1DBLOCK];
// Reduce x_j^2 to CU1DBLOCK elements per row
Real tsum = Real(0);
for (int j = tid; j < x_d.cols; j += CU1DBLOCK) {
tsum += x_row[j] * x_row[j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
ssum[tid] += ssum[tid + shift];
__syncthreads();
}
// Reduce last warp to 1 element per row.
// Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
const Real kSquaredNormFloor = 1.3552527156068805425e-20; // 2^-66
if (tid == 0) {
ssum[0] = sqrt(
fmax(ssum[0] / (target_rms * target_rms * x_d.cols), kSquaredNormFloor));
}
// Broadcast floored stddev to all threads.
__syncthreads();
const Real stddev_div_target_rms = ssum[0];
const Real scale = Real(1) / stddev_div_target_rms;
// Store normalized input to output
Real* y_row = y + i * y_stride;
for (int j = tid; j < x_d.cols; j += CU1DBLOCK) {
y_row[j] = x_row[j] * scale;
}
if (tid == 0 && add_log_stddev) {
y_row[x_d.cols] = log(stddev_div_target_rms * target_rms);
}
}
template<typename Real>
__global__
static void _diff_normalize_per_row(Real *id, int id_stride, const Real *iv,
MatrixDim iv_dim, const Real* od,
int od_stride, Real target_rms,
bool add_log_stddev) {
const Real kSquaredNormFloor = 1.3552527156068805425e-20; // 2^-66
const Real kInvNormFloor = 8589934592.0;
const int tid = threadIdx.x;
const int i = blockIdx.x;
const Real* iv_row = iv + i * iv_dim.stride;
const Real* od_row = od + i * od_stride;
// reduce to CU1DBLOCK elements per row
Real dot_products = Real(0);
Real in_norm = Real(0);
for (int j = tid; j < iv_dim.cols; j += CU1DBLOCK) {
const Real iv_ij = iv_row[j];
dot_products += iv_ij * od_row[j];
in_norm += iv_ij * iv_ij;
}
__shared__ Real sprod[CU1DBLOCK];
__shared__ Real snorm[CU1DBLOCK];
sprod[tid] = dot_products;
snorm[tid] = in_norm;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
sprod[tid] += sprod[tid + shift];
snorm[tid] += snorm[tid + shift];
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
sprod[tid] += sprod[tid + shift];
snorm[tid] += snorm[tid + shift];
}
}
// broadcast the sum results
__syncthreads();
dot_products = sprod[0];
in_norm = snorm[0];
Real log_stddev_deriv;
if (add_log_stddev) {
log_stddev_deriv = Real(1) / max(in_norm, iv_dim.cols * kSquaredNormFloor)
* od_row[iv_dim.cols];
}
const Real inv_d_scaled = Real(1) / (iv_dim.cols * target_rms * target_rms);
in_norm = Real(1) / sqrt(max(in_norm * inv_d_scaled, kSquaredNormFloor));
const Real f = in_norm == kInvNormFloor ? Real(0) : in_norm;
dot_products *= f * f * f * inv_d_scaled;
for (int j = tid; j < iv_dim.cols; j += CU1DBLOCK) {
const Real iv_ij = iv_row[j];
Real id_ij = id[i * id_stride + j];
if (add_log_stddev) {
id_ij += log_stddev_deriv * iv_ij;
}
if (id != od) {
id_ij += in_norm * od_row[j];
} else {
id_ij *= in_norm;
}
id_ij -= dot_products * iv_ij;
id[i * id_stride + j] = id_ij;
}
}
// Per-row log-softmax operation on 'x', with writing to 'y'.
// note, x and y may point to the same memory. This is equivalent to setting
// matrix y to matrix x and then, for each row of y, subtracting the offset that
// will make exp(y.row[j]) sum to 1 for each row j.
//
// It expects to be called with CU1DBLOCK threads.
// The number of blocks [i.e. the gridDim] equals to y_dim.rows,
// so one block of threads processes each row. x and y are
// expected to have the same dimension, but possibly different row strides.
template<typename Real>
__global__
static void _log_softmax_reduce(Real* y, const Real* x, MatrixDim y_dim,
int x_stride) {
__shared__ Real smem[CU1DBLOCK];
const int i = blockIdx.x;
const int x_start = i * x_stride;
const int y_start = i * y_dim.stride;
const int tid = threadIdx.x;
// find max element of the row
// reduce to CU1DBLOCK elements per row.
Real tmax = -1e20;
for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) {
tmax = fmax(tmax, x[x_start + j]);
}
smem[tid] = tmax;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
}
// broadcast max to all threads
__syncthreads();
Real max = smem[0];
// sum_j(exp(x(i,j)-max))
// reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) {
tsum += exp(x[x_start + j] - max);
}
smem[tid] = tsum;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] += smem[tid + shift];
}
}
// broadcast sum to all threads
__syncthreads();
Real log_sum = log(smem[0]);
// normalize the row
for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) {
y[y_start + j] = x[x_start + j] - max - log_sum;
}
}
template<typename Real>
__global__
static void _splice(Real* y, const Real* x, const int32_cuda* off,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d_out.stride;
if (i < d_out.cols && j < d_out.rows) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if (src_row < 0)
src_row = 0;
if (src_row >= d_in.rows)
src_row = d_in.rows - 1;
y[index] = x[src_col + src_row * d_in.stride];
}
}
template<typename Real>
__global__
static void _take_mean(const Real* x, Real* y, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index1 = i + j * d_in.stride;
int32_cuda index2 = j + i * d_in.stride;
if (i <= j && j < d_in.rows) {
int32_cuda index_sp = (j * (j + 1) / 2) + i;
y[index_sp] = 0.5 * (x[index1] + x[index2]);
}
}
template<typename Real>
__global__
static void _take_lower(const Real* x, Real* y, MatrixDim d_in) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j > i || i >= d_in.rows)
return;
int index = i * d_in.stride + j;
Real val = x[index];
int index_sp = (i * (i + 1) / 2) + j;
y[index_sp] = val;
}
template<typename Real>
__global__
static void _take_upper(const Real* x, Real* y, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j < i || j >= d_in.rows)
return;
int32_cuda index = i * d_in.stride + j;
int32_cuda index_sp = (j * (j + 1) / 2) + i;
y[index_sp] = x[index];
}
template<typename Real>
__global__
static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
y[i] = x[index];
}
}
template<typename Real>
__global__
static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int j = blockIdx.y * blockDim.y + threadIdx.y; //
if (i < dim.cols && j < dim.rows) {
int dst_index = i + j * dim.stride, src_index;
if (j <= i) { // no transpose
src_index = (i * (i + 1) / 2) + j;
} else { // transpose.
src_index = (j * (j + 1) / 2) + i;
}
y[dst_index] = x[src_index];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d_out.stride;
if (i < d_out.cols && j < d_out.rows) {
int32_cuda src_col = copy_from[i];
if (src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j * d_in.stride];
} else {
y[index] = 1.0 / 0.0;
}
}
}
template<typename Real>
__global__
static void _one(Real* x, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
x[i] = 1.0;
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d_out.stride;
if (i < d_out.cols && j < d_out.rows) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row * d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d,
int stride_grad) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride, grad_index = i + j * stride_grad;
if (i < d.cols && j < d.rows) {
if (wei[index] == 0.0)
return; //skip L1 if zero weight!
Real l1_signed = l1;
if (wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
//simulate update
Real after = wei[index] - lr * grad[grad_index] - l1_signed;
if ((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[grad_index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id,
MatrixDim d) {
const int32_cuda i = blockIdx.x;
const int32_cuda base = i * d.stride;
const int32_cuda tid = threadIdx.x;
__shared__ Real smax[CU1DBLOCK];
__shared__ int32_cuda sidx[CU1DBLOCK];
Real tmax = -1e20;
int32_cuda tidx = -1;
// Loop over blocks for coalesced memory access.
for (int32_cuda j = tid; j < d.cols; j += CU1DBLOCK) {
const Real val = mat[base + j];
if (val > tmax) {
tmax = val;
tidx = j;
}
}
smax[tid] = tmax;
sidx[tid] = tidx;
// Parallel reduce
#pragma unroll
for (int32_cuda num_working_threads = CU1DBLOCK / 2;
num_working_threads >= warpSize; num_working_threads >>= 1) {
__syncthreads();
if (tid < num_working_threads) {
if (smax[tid + num_working_threads] > smax[tid]) {
smax[tid] = smax[tid + num_working_threads];
sidx[tid] = sidx[tid + num_working_threads];
}
}
}
// Warp reduce without __syncthreads()
// (note.: synchronizes implicitly within a warp at the multiprocessor)
if (tid < warpSize / 2) {
#pragma unroll
for (int32_cuda num_working_threads = warpSize / 2; num_working_threads > 0;
num_working_threads >>= 1) {
if (smax[tid + num_working_threads] > smax[tid]) {
smax[tid] = smax[tid + num_working_threads];
sidx[tid] = sidx[tid + num_working_threads];
}
}
}
if (tid == 0) {
if (vec_val) {
vec_val[i] = smax[0];
}
vec_id[i] = sidx[0];
}
}
template<typename Real>
__global__
static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out,
Real* vec_log_post, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 0)
return;
if (j < d.rows) {
int32_cuda index = vec_tgt[j] + j * d.stride;
Real value = mat_net_out[index];
if (value < 1e-20)
value = 1e-20;
vec_log_post[j] = log(value);
mat_net_out[index] -= 1.0;
}
}
template<typename Real>
__global__
static void _diff_softmax(Real* x, const MatrixDim dim, const Real* value,
const int value_stride, const Real* diff,
const int diff_stride) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int value_start = i * value_stride;
const int diff_start = i * diff_stride;
const int x_start = i * dim.stride;
// Loop along the matrix row. Reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < dim.cols; j += CU1DBLOCK) {
tsum += value[value_start + j] * diff[diff_start + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
ssum[tid] += ssum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// Broadcast result to all threads
__syncthreads();
const Real pe = ssum[0];
// Apply element-wise x = value * (diff - pe)
for (int j = tid; j < dim.cols; j += CU1DBLOCK) {
x[x_start + j] = value[value_start + j] * (diff[diff_start + j] - pe);
}
}
// Differentiate backward through the log softmax function.
// "out_value" is the log softmax output. Does, for each row i,
// in_deriv(i) = out_deriv(i) - sum(out_deriv(i)) .* exp(out_value(i))
// ???(i) is row-vector.
// CUDA thread layout: 1 thread block (CU1DBLOCK == 256 threads) per matrix-row.
template<typename Real>
__global__
static void _diff_log_softmax(const MatrixDim in_deriv_dim,
const Real* out_value, const int out_value_stride,
const Real* out_deriv, const int out_deriv_stride,
Real* in_deriv) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int out_value_start = i * out_value_stride;
const int out_deriv_start = i * out_deriv_stride;
const int in_deriv_start = i * in_deriv_dim.stride;
// Loop along the matrix row. Reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < in_deriv_dim.cols; j += CU1DBLOCK) {
tsum += out_deriv[out_deriv_start + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
ssum[tid] += ssum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// Broadcast result to all threads
__syncthreads();
const Real sum_e = ssum[0];
// Apply element-wise x = out_deriv - exp(value) * sum_e
for (int j = tid; j < in_deriv_dim.cols; j += CU1DBLOCK) {
in_deriv[in_deriv_start + j] = out_deriv[out_deriv_start + j]
- exp(out_value[out_value_start + j]) * sum_e;
}
}
/**
this function computes the core part of the LSTM nonlinearity.
@param [in] in A matrix, of dimension num_rows by 5*cell_dim
(i.e. its num-cols must be a multiple of 5).
The column-space is interpreted as 5
consecutive blocks, each of dimension cell_dim,
which we name:
(i_part, f_part, c_part, o_part, c_{t-1}).
If 'have_dropout_mask' is nonzero, each row of
'in' will have 3 extra elements, interpreted
as dropout masks/scales for i_t, f_t and o_t.
@param [in] params A matrix, of dimension 3 by cell_dim,
with rows containing the 3 diagonal parameter matrices
used in LSTMs, namely
w_{ic}, w_{fc} and w_{oc}.
@param [out] out A matrix, of dimension num_rows by 2*cell_dim.
The quantities c_t and m_t respectively are put there
(in two blocks of column-dimension cell_dim),
according to the following equations:
i_t = Sigmoid(i_part + w_{ic}*c_{t-1})
f_t = Sigmoid(f_part + w_{fc}*c_{t-1})
c_t = f_t*c_{t-1} + i_t * Tanh(c_part)
o_t = Sigmoid(o_part + w_{oc}*c_t)
m_t = o_t * Tanh(c_t)
We use 1D thread block with CU1DBLOCK threads.
It works best when cell_dim is a multiple of CU1DBLOCK.
We use 1d Grid. Each block is working on one row of the in and out matrices.
*/
template<typename Real>
__global__
static void _lstm_nonlinearity(const Real* in, const int in_stride,
const Real* params, const int params_stride,
const int out_stride, const int cell_dim,
const int have_dropout_mask, const int num_rows,
Real* out) {
const int tid = threadIdx.x;
const int i = blockIdx.x;
const Real* i_part = in + i * in_stride;
const Real* f_part = in + i * in_stride + cell_dim;
const Real* c_part = in + i * in_stride + cell_dim * 2;
const Real* o_part = in + i * in_stride + cell_dim * 3;
const Real* c_tm1 = in + i * in_stride + cell_dim * 4;
const Real* w_ic = params;
const Real* w_fc = params + params_stride;
const Real* w_oc = params + params_stride * 2;
Real* c_t = out + i * out_stride;
Real* m_t = out + i * out_stride + cell_dim;
Real i_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5] : 1),
f_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5 + 1] : 1),
o_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5 + 2] : 1);
for (int j = tid; j < cell_dim; j += CU1DBLOCK) {
Real c_tm1_j = c_tm1[j];
Real i_t_j = Real(1) / (Real(1) + exp(-i_part[j] - w_ic[j] * c_tm1_j));
Real f_t_j = Real(1) / (Real(1) + exp(-f_part[j] - w_fc[j] * c_tm1_j));
Real c_t_j = f_t_j * f_scale * c_tm1_j + i_t_j * i_scale * tanh(c_part[j]);
Real o_t_j = Real(1) / (Real(1) + exp(-o_part[j] - w_oc[j] * c_t_j));
c_t[j] = c_t_j;
m_t[j] = o_t_j * o_scale * tanh(c_t_j);
}
}
/**
This function does the 'backward' pass corresponding to the function
ComputeLstmNonlinearity. It's a little more complicated than you might
expect because of the 'self-repair' mechanism that we use to prevent the
sigmoid and tanh nonlinearities oversaturating, and because of the
average-activation and average-derivative stats that we store for these
nonlinearites (these stats are used both to control the self-repair
mechanism, and for diagnostic purposes).
Because the forward pass computes various intermediate values that are not
output, this function actually has to do the same computations as the
forward pass before it actually does the backprop.
In the following description, `C` is for `cell_dim`, `N` is for `num_rows`.
@param [in] input The same as in ComputeLstmNonlinearity().
A matrix, of dimension N by 5C (i.e. its num-cols must be
a multiple of 5). The column-space is interpreted as 5
consecutive blocks, each of dimension C, which we name:
(i_part, f_part, c_part, o_part, c_{t-1}).
If 'have_dropout_mask' is nonzero, each row of
'in' will have 3 extra elements, interpreted
as dropout masks/scales for i_t, f_t and o_t.
@param [in] params The same as in ComputeLstmNonlinearity().
A matrix, of dimension 3 by C, with rows containing the
three diagonal parameter matrices used in LSTMs, namely
w_{ic}, w_{fc} and w_{oc}.
@param [in] output_deriv
A matrix, of dimension N by 2C, containing the derivative
of the objective function we're backpropagating,
w.r.t. the quantities c_t and m_t (in two blocks of
column-dimension C).
@param [in] deriv_sum_in
This is used in the self-repair code to identify
oversaturated nonlinearities.
It is a matrix, of dimension 5 by C, corresponding to
the totals of the derivatives of the 5 sigmoid and tanh
nonlinearities, in they order they appear in the equations
in the documentation of ComputeLstmNonlinearity()
respectively,
they appear in the equations for (i_t, f_t, c_t, o_t, m_t).
This will be divided by 'count_in' to get the average
derivative value so far, for each of the nonlinearities.
@param [in] self_repair_config
A vector of dimension 10, containing the configuration of
the self-repair to be used for the 5 nonlinearities.
The first 5 elements are the self_repair_lower_threshold
values (typically 0.05 for sigmoid and 0.2 for tanh),
and the next 5 elements are the corresponding
self-repair-scales (typically 10^-5).
@param [in] count_in The data-count that corresponds to the stats in
'deriv_sum_in' at entry to the function.
This function should tolerate the count being zero
(in that case, it is free to do the self-repair or not,
as this should only happen on the 1st minibatch of each
training job).
@param [out] input_deriv
May be NULL; if not, this function writes, to this
location, the backpropagated derivative of the objective
function w.r.t. the 'input' matrix. This matrix should
have the same dimension as 'input' i.e. N by 5C. In
addition to the regular backpropagated derivative, the
output will include small values relating to 'self-repair'.
@param [out] params_deriv
May be NULL; if not, this is where this function *writes*
[not adds] the backpropagated derivative of the objective
function w.r.t. 'params'; it should have the same dimension
as 'params' (3 by C). (This matrix will then be processed
by the natural gradient code and added to the appropriate
copy of the parameter matrix, outside this function).
@param [out] value_sum_out
Must be NULL if params_deriv is NULL; if not, a matrix of
dimension 5 by C. This function *adds* to this location
the total value of each of the sigmoid/tanh nonlinearities
that it computes (this is for diagnostic purposes).
@param [out] deriv_sum_out
Must be NULL if params_deriv is NULL; if not, a matrix of
dimension 5 by C; this function *adds* to this location the
total of the derivative of each of the sigmoid/tanh
nonlinearities that it computes (this is for diagnostic
purposes and to control the self-repair). This function
should tolerate the case when 'deriv_sum_out' points to the
same data as 'deriv_sum_in'.
@param [out] self_repair_sum_out
Must be NULL if params_deriv is NULL; if not, a matrix of
dimension 5 by C; this function *writes* to this location
the sum of the number of times the self-repair code was
activated (integer values 0 <= k <= N). This will be
processed outside this function into self-repair stats for
diagnostics.
// Use 2D block (8x32 threads) as we need to compute column sum.
// Use 1D grid to cover the data matrix `cell_dim`.
*/
template<typename Real>
__global__
static void _diff_lstm_nonlinearity(const int cell_dim, const int have_dropout_mask,
const int num_rows,
const Real* input, const int input_stride,
const Real* params, const int params_stride,
const Real* output_deriv,
const int output_deriv_stride,
const double* deriv_sum_in,
const int deriv_sum_in_stride,
const Real* self_repair_config,
double count, Real* input_deriv,
const int input_deriv_stride,
Real* params_deriv,
const int params_deriv_stride,
double* value_sum_out,
const int value_sum_out_stride,
double* deriv_sum_out,
const int deriv_sum_out_stride,
Real* self_repair_sum_out,
const int self_repair_sum_out_stride) {
__shared__ Real smem[CU1DBLOCK];
const int j = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int grid_stride = gridDim.y * blockDim.y;
const int i0 = blockIdx.y * blockDim.y + threadIdx.y;
Real w_ic_deriv_sum = 0;
Real w_fc_deriv_sum = 0;
Real w_oc_deriv_sum = 0;
Real i_t_value_sum = 0, i_t_deriv_sum = 0;
Real f_t_value_sum = 0, f_t_deriv_sum = 0;
Real c_part_value_sum = 0, c_part_deriv_sum = 0;
Real o_t_value_sum = 0, o_t_deriv_sum = 0;
Real c_t_value_sum = 0, c_t_deriv_sum = 0;
bool update_sr[5];
if (j < cell_dim) {
const Real w_ic = params[j];
const Real w_fc = params[params_stride + j];
const Real w_oc = params[2 * params_stride + j];
const Real* sr_config = self_repair_config;
# pragma unroll
for (int i = 0; i < 5; i++) {
update_sr[i] =
deriv_sum_in[i * deriv_sum_in_stride + j] < sr_config[i] * count;
}
const Real i_t_self_repair = (update_sr[0] ? sr_config[5] : 0);
const Real f_t_self_repair = (update_sr[1] ? sr_config[6] : 0);
const Real c_part_self_repair = (update_sr[2] ? sr_config[7] : 0);
const Real o_t_self_repair = (update_sr[3] ? sr_config[8] : 0);
const Real c_t_self_repair = (update_sr[4] ? sr_config[9] : 0);
for (int i = i0; i < num_rows; i += grid_stride) {
const Real i_part = input[i * input_stride + j];
const Real f_part = input[i * input_stride + j + cell_dim];
const Real c_part = input[i * input_stride + j + 2 * cell_dim];
const Real o_part = input[i * input_stride + j + 3 * cell_dim];
const Real c_prev = input[i * input_stride + j + 4 * cell_dim];
const Real i_scale = (have_dropout_mask ?
input[i * input_stride + cell_dim * 5] : 1),
f_scale = (have_dropout_mask ?
input[i * input_stride + cell_dim * 5 + 1] :1),
o_scale = (have_dropout_mask ?
input[i * input_stride + cell_dim * 5 + 2] :1);
const Real i_t = Real(1) / (1 + exp(-i_part - w_ic * c_prev));
const Real f_t = Real(1) / (1 + exp(-f_part - w_fc * c_prev));
const Real tanh_c_part = tanh(c_part);
const Real c_t = f_t * f_scale * c_prev + i_t * i_scale * tanh_c_part;
const Real o_t = 1 / (1 + exp(-o_part - w_oc * c_t));
const Real tanh_c_t = tanh(c_t);
const Real i_t_deriv = i_t * (1 - i_t);
const Real f_t_deriv = f_t * (1 - f_t);
const Real c_part_deriv = 1 - tanh_c_part * tanh_c_part;
const Real o_t_deriv = o_t * (1 - o_t);
const Real c_t_deriv = 1 - tanh_c_t * tanh_c_t;
if (params_deriv) {
i_t_value_sum += i_t;
f_t_value_sum += f_t;
c_part_value_sum += tanh_c_part;
o_t_value_sum += o_t;
c_t_value_sum += tanh_c_t;
i_t_deriv_sum += i_t_deriv;
f_t_deriv_sum += f_t_deriv;
c_part_deriv_sum += c_part_deriv;
o_t_deriv_sum += o_t_deriv;
c_t_deriv_sum += c_t_deriv;
}
const Real dc_t_out = output_deriv[i * output_deriv_stride + j];
const Real dm_t = output_deriv[i * output_deriv_stride + j + cell_dim];
const Real dtanh_c_t = o_t * o_scale * dm_t;
const Real do_t = o_scale * tanh_c_t * dm_t;
const Real do_t_input = (o_t_deriv * do_t
- (2 * o_t - 1) * o_t_self_repair);
const Real dc_t = (c_t_deriv * dtanh_c_t + dc_t_out + do_t_input * w_oc)
- tanh_c_t * c_t_self_repair;
const Real dtanh_c_part = i_t * i_scale * dc_t;
const Real df_t = dc_t * f_scale * c_prev;
const Real df_t_input = (df_t * f_t_deriv
- (2 * f_t - 1) * f_t_self_repair);
const Real di_t = dc_t * i_scale * tanh_c_part;
const Real di_t_input = (di_t * i_t_deriv
- (2 * i_t - 1) * i_t_self_repair);
if (params_deriv) {
w_ic_deriv_sum += c_prev * di_t_input;
w_fc_deriv_sum += c_prev * df_t_input;
w_oc_deriv_sum += c_t * do_t_input;
}
const Real dc_prev = w_ic * di_t_input + w_fc * df_t_input + f_t * f_scale * dc_t;
const Real do_part = do_t_input;
const Real dc_part = (c_part_deriv * dtanh_c_part
- tanh_c_part * c_part_self_repair);
const Real df_part = df_t_input;
const Real di_part = di_t_input;
if (input_deriv) {
input_deriv[i * input_deriv_stride + j] = di_part;
input_deriv[i * input_deriv_stride + j + cell_dim] = df_part;
input_deriv[i * input_deriv_stride + j + cell_dim * 2] = dc_part;
input_deriv[i * input_deriv_stride + j + cell_dim * 3] = do_part;
input_deriv[i * input_deriv_stride + j + cell_dim * 4] = dc_prev;
}
}
}
if (params_deriv) {
// compute params_deriv
smem[tid] = w_ic_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
params_deriv[j] = smem[tid];
}
__syncthreads();
smem[tid] = w_fc_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
params_deriv[params_deriv_stride + j] = smem[tid];
}
__syncthreads();
smem[tid] = w_oc_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
params_deriv[2 * params_deriv_stride + j] = smem[tid];
}
// compute value_sum_out
__syncthreads();
smem[tid] = i_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[j] += smem[tid];
}
__syncthreads();
smem[tid] = f_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[value_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_part_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[2 * value_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = o_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[3 * value_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[4 * value_sum_out_stride + j] += smem[tid];
}
// need to update self_repair_sum_out before deriv_sum_out, because
// deriv_sum_out and deriv_sum_in might point to the same memory.
if (i0 < 5 && j < cell_dim) {
self_repair_sum_out[i0 * self_repair_sum_out_stride + j] =
update_sr[i0] ? num_rows : 0;
}
// compute derive_sum_out
__syncthreads();
smem[tid] = i_t_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[j] += smem[tid];
}
__syncthreads();
smem[tid] = f_t_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[deriv_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_part_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[2 * deriv_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = o_t_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[3 * deriv_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_t_deriv_sum;
__syncthreads();
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[4 * deriv_sum_out_stride + j] += smem[tid];
}
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cuda_int32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value,
MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cuda_int32_add(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value,
MatrixDim d) {
hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cuda_int32_sequence(dim3 Gr, dim3 Bl, int32_cuda* data, int length,
int32_cuda base) {
hipLaunchKernelGGL(( _sequence), dim3(Gr), dim3(Bl), 0, 0, data, length, base);
}
/*
* "float"
*/
/*
* CuMatrix
*/
void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {
hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA);}
void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {
hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA);}
void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat,
MatrixDim mat_dim, const float *vec,
const float *mat2, int mat2_row_stride,
int mat2_col_stride, float beta) {
hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride,
mat2_col_stride, beta);
}
void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d);
}
void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power,
bool include_sign, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d);
}
void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst,
const float* const * src, MatrixDim dst_dim) {
hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim);
}
void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const * dst,
const float* src, MatrixDim src_dim) {
hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim);
}
void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride);
}
void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst,
const float* const * src, MatrixDim dst_dim) {
hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim);
}
void cudaF_add_to_rows(dim3 Gr, dim3 Bl, float alpha,
float* dst, const float* src, const MatrixIndexT_cuda* reorder,
MatrixDim src_dim, int dst_stride) {
hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, src_dim, dst_stride);
}
void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const * dst,
const float* src, MatrixDim src_dim) {
hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim);
}
void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val,
MatrixDim d) {
hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d);
}
void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val,
MatrixDim d) {
hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d);
}
void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A,
MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A,
MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d,
int src_stride) {
hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaF_min(dim3 Gr, dim3 Bl, float* mat, const float* other,
MatrixDim mat_d, int other_stride) {
hipLaunchKernelGGL(( _min), dim3(Gr),dim3(Bl), 0, 0, mat,other,mat_d,other_stride);
}
void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale,
MatrixDim d) {
hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale,
MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x,
MatrixDim d, int src_stride, int group_size) {
hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size);
}
void cudaF_diff_group_pnorm(dim3 Gr, dim3 Bl, float *id, const float *iv,
const float *ov, const float* od, MatrixDim id_dim,
int iv_stride, int ov_stride, int od_stride,
int group_size, float power) {
hipLaunchKernelGGL(( _diff_group_pnorm), dim3(Gr), dim3(Bl), 0, 0, id, iv, ov, od, id_dim, iv_stride, ov_stride,
od_stride, group_size, power);
}
void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1,
const float *x2, MatrixDim y_dim, int x1_stride,
int x2_stride, int group_size) {
hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, y_dim, x1_stride, x2_stride,
group_size);
}
void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div,
MatrixDim d) {
hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst,
MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
} else {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
}
}
void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src,
int32_cuda num_row_blocks, int32_cuda num_col_blocks,
float* dst, MatrixDim d, int src_stride,
int A_trans) {
if (A_trans) {
hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks,
dst, d, src_stride);
} else {
hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst,
d, src_stride);
}
}
void cudaF_add_mat_repeated(dim3 Gr, dim3 Bl, float alpha, const float* src,
MatrixDim src_dim, float *dst, MatrixDim dst_dim) {
hipLaunchKernelGGL(( _add_mat_repeated), dim3(Gr),dim3(Bl), 0, 0, alpha, src, src_dim, dst, dst_dim);
}
void cudaF_set_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B,
const float *C, float *dst, MatrixDim d,
int stride_a, int stride_b, int stride_c) {
hipLaunchKernelGGL(( _set_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d, stride_a, stride_b, stride_c);
}
void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T,
MatrixDim tdim, float *S, MatrixDim sdim) {
hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim);
}
void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col,
float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d);
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row,
float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat,
MatrixDim mat_dim, const float *mat2,
int mat2_row_stride, int mat2_col_stride,
const float *vec, float beta) {
hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride,
mat2_col_stride, vec, beta);
}
void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data,
const float *srcA_data, const float *srcB_data,
MatrixDim dim, int srcA_stride, int srcB_stride,
float alpha, float beta) {
hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim,
srcA_stride, srcB_stride, alpha, beta);
}
// CURRENTLY UNUSED...
void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask,
MatrixDim dmat, MatrixDim dmask) {
hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaF_max_mat_cols(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d,
TransReduceOp<MAX,float>());
}
void cudaF_min_mat_cols(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d,
TransReduceOp<MIN,float>());
}
void cudaF_sum_mat_cols(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d,
TransReduceOp<SUM,float>());
}
void cudaF_add_col_sum_mat(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d, const float alpha,
const float beta) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr), dim3(Bl), 0, 0, result, mat, d,
TransReduceOp<SUMAB, float>(alpha, beta));
}
void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig,
float changed) {
hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed);
}
void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a,
float param_1, float param_2, float param_3,
int* flag, int dim) {
hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim);
}
void cublas_copy_kaldi_fd(int Gr, int Bl, int n, const float* x, int incx,
double* y, int incy) {
hipLaunchKernelGGL(( _cublas_copy_kaldi), dim3(Gr),dim3(Bl), 0, 0, n, x, incx, y, incy);
}
void cublas_copy_kaldi_df(int Gr, int Bl, int n, const double* x, int incx,
float* y, int incy) {
hipLaunchKernelGGL(( _cublas_copy_kaldi), dim3(Gr),dim3(Bl), 0, 0, n, x, incx, y, incy);
}
void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) {
hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim);
}
void cudaF_vec_min(int Gr, int Bl, const float* v, float* value, int dim,
int inc) {
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc,
TransReduceOp<MIN, float>());
}
void cudaF_vec_max(int Gr, int Bl, const float* v, float* value, int dim,
int inc) {
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc,
TransReduceOp<MAX, float>());
}
void cudaF_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const float* A, const float* B,
MatrixDim dA, int B_stride, float* value) {
hipLaunchKernelGGL(( _trace_mat_mat_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value);
}
void cudaF_trace_mat_mat(dim3 Gr, dim3 Bl, const float* A, const float* B,
MatrixDim dA, int B_stride, float* value) {
hipLaunchKernelGGL(( _trace_mat_mat<32>) , dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value);
}
void cudaF_add_diag_mat_mat_MNT(int Gr, int Bl, const float alpha,
const float* M, const MatrixDim dim_M,
const float* N, const int stride_N,
const float beta, float* v) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MNT), dim3(Gr),dim3(Bl), 0, 0, alpha,M,dim_M,N,stride_N,beta,v);
}
void cudaF_add_diag_mat_mat_MTN(dim3 Gr, dim3 Bl, const float alpha,
const float* M, const int stride_M,
const float* N, const MatrixDim dim_N,
const float beta, float* v) {
if (Bl.x == 16) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MTN<16>) , dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v);
} else if (Bl.x==32) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MTN<32>), dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v);
}
}
void cudaF_add_diag_mat_mat_MN(dim3 Gr, dim3 Bl, const float alpha,
const float* M, const int stride_M,
const float* N, const MatrixDim dim_N,
const float beta, float* v) {
if (Bl.x == 16) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MN<16>) , dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v);
} else if (Bl.x==32) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MN<32>), dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v);
}
}
void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x,
const float* y, float beta, int dim) {
hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim);
}
void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) {
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc,
TransReduceOp<SUM, float>());
}
void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
float alpha, MatrixElement<float>* x,
int num_elements) {
hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, num_elements);
}
void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim,
float alpha, const Int32Pair* indices,
const float* x, int s, float* data) {
hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data);
}
void cudaF_matrix_add_to_elements(dim3 Gr, dim3 Bl, float alpha,
float* mat, MatrixDim dim,
const MatrixIndexT_cuda* elements) {
hipLaunchKernelGGL(( _cuda_matrix_add_to_elements), dim3(Gr), dim3(Bl), 0, 0, alpha, mat, dim, elements);
}
void cudaF_vector_copy_elements(dim3 Gr, dim3 Bl, float *data, int dim,
const float *src_mat, int mat_stride,
bool transpose,
const MatrixIndexT_cuda* elements) {
hipLaunchKernelGGL(( _cuda_vector_copy_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, src_mat, mat_stride,
transpose, elements);
}
void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s,
const float* z, MatrixDim d, float* z2, MatrixDim d2,
float* t) {
hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t);
}
void cudaD_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<double>* x, int s,
const double* z, MatrixDim d, double* z2,
MatrixDim d2, double* t) {
hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t);
}
void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst,
const float *src, int dim) {
hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim);
}
void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val,
float *count, int dim) {
hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim);
}
void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val,
float *count, int dim) {
hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v, ceiling_val,count,dim);
}
void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) {
hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim);
}
void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) {
hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim);
}
void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) {
hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d);
}
void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d,
const float *Adata, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, float alpha, float beta,
int B_trans) {
if (B_trans) {
hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
} else {
hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
}
}
void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data,
int num_blocks, const float *C_data,
int C_num_cols, int C_row_stride, int C_col_stride,
const float *D_data, int D_row_stride,
int D_col_stride, float alpha, float beta) {
hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols,
C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha,
beta);
}
/*
* cu::
*/
void cudaF_soft_hinge(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d,
int src_stride, int group_size, float power) {
hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power);
}
void cudaF_group_spec_pnorm(dim3 Gr, dim3 Bl, float* y, const float* x,
MatrixDim d, int src_stride, int group_size,
float power) {
if (power == float(0)) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<L0NORM, float>());
} else if (power == float(1)) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<L1NORM, float>());
} else if (power == float(2)) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<L2NORM, float>());
} else if (power == std::numeric_limits<float>::infinity()) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<LINFNORM, float>());
} else {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<LPNORM, float>(power));
}
}
void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d,
int src_stride, int group_size) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<MAX, float>());
}
void cudaF_sigmoid(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_diff_sigmoid(dim3 Gr, dim3 Bl, float* eout, const float* e,
const float* y, MatrixDim d, int e_stride,
int y_stride) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride);
}
void cudaF_tanh(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_diff_tanh(dim3 Gr, dim3 Bl, float* eout, const float* e,
const float* y, MatrixDim d, int e_stride, int y_stride) {
hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride);
}
void cudaF_parametric_relu(dim3 Gr, dim3 Bl, float* y, const float* x,
MatrixDim d, int src_stride,
const float* a, const float* b) {
hipLaunchKernelGGL(( _parametric_relu), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, a, b);
}
void cudaF_diff_parametric_relu(dim3 Gr, dim3 Bl, float* eout, const float* e,
const float* y, MatrixDim d, int e_stride,
int y_stride, const float* a, const float* b) {
hipLaunchKernelGGL(( _diff_parametric_relu), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride, a, b);
}
void cudaF_heaviside(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _heaviside), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_softmax_reduce(size_t Gr, size_t Bl, float* y, const float* x,
MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_log_softmax_reduce(size_t Gr, size_t Bl, float* y, const float* x,
MatrixDim y_dim, int x_stride) {
hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, y_dim, x_stride);
}
void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x,
const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaF_normalize_per_row(size_t Gr, size_t Bl, float *y, int y_stride,
const float *x, MatrixDim x_d, float target_rms,
bool add_log_stddev) {
hipLaunchKernelGGL(( _normalize_per_row), dim3(Gr), dim3(Bl), 0, 0, y, y_stride, x, x_d, target_rms, add_log_stddev);
}
void cudaF_one(int Gr, int Bl, float* x, int dim) {
hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim);
}
void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim d_in) {
hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim d_in) {
hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim d_in) {
hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim dim) {
hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x, y, dim);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x,
const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x,
const int32_cuda* copy_from, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1,
float lr, MatrixDim d, int stride_grad) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val,
int32_cuda* vec_id, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, d);
}
void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt,
float* mat_net_out, float* vec_log_post, MatrixDim d) {
hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d);
}
void cudaF_diff_softmax(dim3 Gr, dim3 Bl, float* x, const MatrixDim dim,
const float* value, const int value_stride,
const float* diff, const int diff_stride) {
hipLaunchKernelGGL(( _diff_softmax), dim3(Gr), dim3(Bl), 0, 0, x, dim, value, value_stride, diff, diff_stride);
}
void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out,
const float *v_in) {
hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in);
}
void cudaF_diff_log_softmax(dim3 Gr, dim3 Bl, const MatrixDim in_deriv_dim,
const float* out_value, const int out_value_stride,
const float* out_deriv, const int out_deriv_stride,
float* in_deriv) {
hipLaunchKernelGGL(( _diff_log_softmax), dim3(Gr), dim3(Bl), 0, 0, in_deriv_dim, out_value, out_value_stride,
out_deriv, out_deriv_stride, in_deriv);
}
void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col,
const float* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col,
const float* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
const float *src_data, MatrixDim src_dim,
const Int32Pair *indices) {
hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices);
}
void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
const float *src_data, MatrixDim src_dim,
const Int32Pair *indexes) {
hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes);
}
void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
float *output) {
hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output);
}
void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1,
const float *mat2, float *mask,
MatrixDim mat1_dim, int mat2_stride,
int mask_stride) {
hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride,
mask_stride);
}
/*
* "double"
*/
/*
* CuMatrix
*/
void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {
hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA);}
void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {
hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA);}
void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat,
MatrixDim mat_dim, const double *vec,
const double *mat2, int mat2_row_stride,
int mat2_col_stride, double beta) {
hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride,
mat2_col_stride, beta);
}
void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d);
}
void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power,
bool include_sign, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d);
}
void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst,
const double* const * src, MatrixDim dst_dim) {
hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim);
}
void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const * dst,
const double* src, MatrixDim src_dim) {
hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim);
}
void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst,
const double* src, const MatrixIndexT_cuda* reorder,
MatrixDim dst_dim, int src_stride) {
hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride);
}
void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst,
const double* const * src, MatrixDim dst_dim) {
hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim);
}
void cudaD_add_to_rows(dim3 Gr, dim3 Bl, double alpha,
double* dst, const double* src, const MatrixIndexT_cuda* reorder,
MatrixDim src_dim, int dst_stride) {
hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, src_dim, dst_stride);
}
void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha,
double* const * dst, const double* src,
MatrixDim src_dim) {
hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim);
}
void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val,
MatrixDim d) {
hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d);
}
void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val,
MatrixDim d) {
hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d);
}
void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) {
hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) {
hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value,
int dim) {
hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A,
MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A,
MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d,
int src_stride) {
hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaD_min(dim3 Gr, dim3 Bl, double* mat, const double* other, MatrixDim mat_d,
int other_stride) {
hipLaunchKernelGGL(( _min), dim3(Gr),dim3(Bl), 0, 0, mat,other,mat_d,other_stride);
}
void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale,
MatrixDim d) {
hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale,
MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size) {
hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size);
}
void cudaD_diff_group_pnorm(dim3 Gr, dim3 Bl, double *id, const double *iv,
const double *ov, const double* od,
MatrixDim id_dim, int iv_stride, int ov_stride,
int od_stride, int group_size, double power) {
hipLaunchKernelGGL(( _diff_group_pnorm), dim3(Gr), dim3(Bl), 0, 0, id, iv, ov, od, id_dim, iv_stride, ov_stride,
od_stride, group_size, power);
}
void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1,
const double* x2, MatrixDim y_dim,
int x1_stride, int x2_stride, int group_size) {
hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, y_dim, x1_stride, x2_stride,
group_size);
}
void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div,
MatrixDim d) {
hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src,
double* dst, MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
} else {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
}
}
void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src,
int32_cuda num_row_blocks, int32_cuda num_col_blocks,
double* dst, MatrixDim d, int src_stride,
int A_trans) {
if (A_trans) {
hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks,
dst, d, src_stride);
} else {
hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst,
d, src_stride);
}
}
void cudaD_add_mat_repeated(dim3 Gr, dim3 Bl, double alpha, const double* src,
MatrixDim src_dim, double *dst, MatrixDim dst_dim) {
hipLaunchKernelGGL(( _add_mat_repeated), dim3(Gr),dim3(Bl), 0, 0, alpha, src, src_dim, dst, dst_dim);
}
void cudaD_set_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A,
const double *B, const double *C, double *dst,
MatrixDim d, int stride_a, int stride_b,
int stride_c) {
hipLaunchKernelGGL(( _set_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d,stride_a,stride_b,stride_c);
}
void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta,
const double* T, MatrixDim tdim, double *S,
MatrixDim sdim) {
hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim);
}
void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col,
double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row,
double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat,
MatrixDim mat_dim, const double *mat2,
int mat2_row_stride, int mat2_col_stride,
const double *vec, double beta) {
hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride,
mat2_col_stride, vec, beta);
}
void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data,
const double *srcA_data,
const double *srcB_data, MatrixDim dim,
int srcA_stride, int srcB_stride, double alpha,
double beta) {
hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim,
srcA_stride, srcB_stride, alpha, beta);
}
// CURRENTLY UNUSED...
void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask,
MatrixDim dmat, MatrixDim dmask) {
hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaD_max_mat_cols(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d,
TransReduceOp<MAX,double>());
}
void cudaD_min_mat_cols(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d,
TransReduceOp<MIN,double>());
}
void cudaD_sum_mat_cols(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d,
TransReduceOp<SUM,double>());
}
void cudaD_add_col_sum_mat(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d, const double alpha,
const double beta) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr), dim3(Bl), 0, 0, result, mat, d,
TransReduceOp<SUMAB, double>(alpha, beta));
}
void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig,
double changed) {
hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed);
}
void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a,
double param_1, double param_2, double param_3,
int* flag, int dim) {
hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim);
}
void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a,
int dim) {
hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim);
}
void cudaD_vec_min(int Gr, int Bl, const double* v, double* value, int dim,
int inc) {
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc,
TransReduceOp<MIN, double>());
}
void cudaD_vec_max(int Gr, int Bl, const double* v, double* value, int dim,
int inc) {
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc,
TransReduceOp<MAX, double>());
}
void cudaD_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const double* A,
const double* B, MatrixDim dA, int B_stride,
double* value) {
hipLaunchKernelGGL(( _trace_mat_mat_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value);
}
void cudaD_trace_mat_mat(dim3 Gr, dim3 Bl, const double* A, const double* B,
MatrixDim dA, int B_stride, double* value) {
hipLaunchKernelGGL(( _trace_mat_mat<32>) , dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value);
}
void cudaD_add_diag_mat_mat_MNT(int Gr, int Bl, const double alpha,
const double* M, const MatrixDim dim_M,
const double* N, const int stride_N,
const double beta, double* v) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MNT), dim3(Gr),dim3(Bl), 0, 0, alpha,M,dim_M,N,stride_N,beta,v);
}
void cudaD_add_diag_mat_mat_MTN(dim3 Gr, dim3 Bl, const double alpha,
const double* M, const int stride_M,
const double* N, const MatrixDim dim_N,
const double beta, double* v) {
if (Bl.x == 16) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MTN<16>) , dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v);
} else if (Bl.x==32) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MTN<32>), dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v);
}
}
void cudaD_add_diag_mat_mat_MN(dim3 Gr, dim3 Bl, const double alpha,
const double* M, const int stride_M,
const double* N, const MatrixDim dim_N,
const double beta, double* v) {
if (Bl.x == 16) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MN<16>) , dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v);
} else if (Bl.x==32) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MN<32>), dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v);
}
}
void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x,
const double* y, double beta, int dim) {
hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim);
}
void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col,
const double* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col,
const double* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) {
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v,value,dim,inc,
TransReduceOp<SUM, double>());
}
void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
double alpha, MatrixElement<double>* x,
int num_elements) {
hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, num_elements);
}
void cudaD_vector_copy_elements(dim3 Gr, dim3 Bl, double *data, int dim,
const double *src_mat, int mat_stride,
bool transpose,
const MatrixIndexT_cuda* elements) {
hipLaunchKernelGGL(( _cuda_vector_copy_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, src_mat, mat_stride,
transpose, elements);
}
void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim,
double alpha, const Int32Pair* indices,
const double* x, int s, double* data) {
hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data);
}
void cudaD_matrix_add_to_elements(dim3 Gr, dim3 Bl, double alpha,
double* mat, MatrixDim dim,
const MatrixIndexT_cuda* elements) {
hipLaunchKernelGGL(( _cuda_matrix_add_to_elements), dim3(Gr), dim3(Bl), 0, 0, alpha, mat, dim, elements);
}
void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst,
const double *src, int dim) {
hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim);
}
void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val,
float *count, int dim) {
hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim);
}
void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val,
float *count, int dim) {
hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v,ceiling_val,count,dim);
}
void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) {
hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim);
}
void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) {
hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim);
}
void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) {
hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d);
}
void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d,
const double *Adata, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, double alpha, double beta,
int B_trans) {
if (B_trans) {
hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
} else {
hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
}
}
void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data,
int num_blocks, const double *C_data,
int C_num_cols, int C_row_stride, int C_col_stride,
const double *D_data, int D_row_stride,
int D_col_stride, double alpha, double beta) {
hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols,
C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride,
alpha, beta);
}
/*
* cu::
*/
void cudaD_soft_hinge(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size,
double power) {
hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power);
}
void cudaD_group_spec_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size,
double power) {
if (power == double(0)) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<L0NORM, double>());
} else if (power == double(1)) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<L1NORM, double>());
} else if (power == double(2)) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<L2NORM, double>());
} else if (power == std::numeric_limits<double>::infinity()) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<LINFNORM, double>());
} else {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<LPNORM, double>(power));
}
}
void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride, int group_size) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<MAX, double>());
}
void cudaD_sigmoid(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_diff_sigmoid(dim3 Gr, dim3 Bl, double* eout, const double* e,
const double* y, MatrixDim d, int e_stride,
int y_stride) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride);
}
void cudaD_tanh(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_diff_tanh(dim3 Gr, dim3 Bl, double* eout, const double* e,
const double* y, MatrixDim d, int e_stride, int y_stride) {
hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride);
}
void cudaD_parametric_relu(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride,
const double* a, const double* b) {
hipLaunchKernelGGL(( _parametric_relu), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, a, b);
}
void cudaD_diff_parametric_relu(dim3 Gr, dim3 Bl, double* eout, const double* e,
const double* y, MatrixDim d, int e_stride,
int y_stride, const double* a, const double* b) {
hipLaunchKernelGGL(( _diff_parametric_relu), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride, a, b);
}
void cudaD_heaviside(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _heaviside), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_softmax_reduce(size_t Gr, size_t Bl, double* y, const double* x,
MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_log_softmax_reduce(size_t Gr, size_t Bl, double* y, const double* x,
MatrixDim y_dim, int x_stride) {
hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, y_dim, x_stride);
}
void cudaD_normalize_per_row(size_t Gr, size_t Bl, double *y, int y_stride,
const double *x, MatrixDim x_d, double target_rms,
bool add_log_stddev) {
hipLaunchKernelGGL(( _normalize_per_row), dim3(Gr), dim3(Bl), 0, 0, y, y_stride, x, x_d, target_rms, add_log_stddev);
}
void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x,
const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaD_one(int Gr, int Bl, double* x, int dim) {
hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim);
}
void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_in) {
hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_in) {
hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_in) {
hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_out) {
hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x,y,d_out);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x,
const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x,
const int32_cuda* copy_from, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1,
double lr, MatrixDim d, int stride_grad) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val,
int32_cuda* vec_id, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, d);
}
void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt,
double* mat_net_out, double* vec_log_post, MatrixDim d) {
hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d);
}
void cudaD_diff_softmax(dim3 Gr, dim3 Bl, double* x, const MatrixDim dim,
const double* value, const int value_stride,
const double* diff, const int diff_stride) {
hipLaunchKernelGGL(( _diff_softmax), dim3(Gr), dim3(Bl), 0, 0, x, dim, value, value_stride, diff, diff_stride);
}
void cudaD_diff_log_softmax(dim3 Gr, dim3 Bl, const MatrixDim in_deriv_dim,
const double* out_value, const int out_value_stride,
const double* out_deriv, const int out_deriv_stride,
double* in_deriv) {
hipLaunchKernelGGL(( _diff_log_softmax), dim3(Gr), dim3(Bl), 0, 0, in_deriv_dim, out_value, out_value_stride,
out_deriv, out_deriv_stride, in_deriv);
}
void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out,
MatrixDim d_out, const double *v_in) {
hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in);
}
void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
const double *src_data, MatrixDim src_dim,
const Int32Pair *indices) {
hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices);
}
void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
const double *src_data, MatrixDim src_dim,
const Int32Pair *indexes) {
hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes);
}
void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
double *output) {
hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output);
}
void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1,
const double *mat2, double *mask,
MatrixDim mat1_dim, int mat2_stride,
int mask_stride) {
hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride,
mask_stride);
}
// Some conversion kernels for which it's more convenient
// to not name them F or D.
void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans<32>) , dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans<32>) , dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans<32>) , dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans<32>) , dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const float* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const double* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const float* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const double* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const float* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const double* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const float* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const double* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const float* smat_val, float* trace_vec) {
hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx, const float* smat_val,
float* trace_vec) {
hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val,
double* trace_vec) {
hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val,
double* trace_vec) {
hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaD_lstm_nonlinearity(dim3 Gr, dim3 Bl, const double* in,
const int in_stride, const double* params,
const int params_stride, const int out_stride,
const int cell_dim, const int have_dropout_mask,
const int num_rows, double* out) {
hipLaunchKernelGGL(( _lstm_nonlinearity), dim3(Gr), dim3(Bl), 0, 0,
in, in_stride, params, params_stride,
out_stride, cell_dim, have_dropout_mask, num_rows, out);
}
void cudaF_lstm_nonlinearity(dim3 Gr, dim3 Bl, const float* in,
const int in_stride, const float* params,
const int params_stride, const int out_stride,
const int cell_dim, const int have_dropout_mask,
const int num_rows, float* out) {
hipLaunchKernelGGL(( _lstm_nonlinearity), dim3(Gr), dim3(Bl), 0, 0,
in, in_stride, params, params_stride,
out_stride, cell_dim, have_dropout_mask, num_rows, out);
}
void cudaD_diff_lstm_nonlinearity(dim3 Gr, dim3 Bl, const int cell_dim,
const int have_dropout_mask,
const int num_rows, const double* input,
const int input_stride, const double* params,
const int params_stride,
const double* output_deriv,
const int output_deriv_stride,
const double* deriv_sum_in,
const int deriv_sum_in_stride,
const double* self_repair_config,
double count, double* input_deriv,
const int input_deriv_stride,
double* params_deriv,
const int params_deriv_stride,
double* value_sum_out,
const int value_sum_out_stride,
double* deriv_sum_out,
const int deriv_sum_out_stride,
double* self_repair_sum_out,
const int self_repair_sum_out_stride) {
hipLaunchKernelGGL(( _diff_lstm_nonlinearity), dim3(Gr), dim3(Bl), 0, 0,
cell_dim, have_dropout_mask, num_rows, input,
input_stride, params, params_stride, output_deriv, output_deriv_stride,
deriv_sum_in, deriv_sum_in_stride, self_repair_config, count, input_deriv,
input_deriv_stride, params_deriv, params_deriv_stride, value_sum_out,
value_sum_out_stride, deriv_sum_out, deriv_sum_out_stride,
self_repair_sum_out, self_repair_sum_out_stride);
}
void cudaF_diff_lstm_nonlinearity(dim3 Gr, dim3 Bl, const int cell_dim,
const int have_dropout_mask,
const int num_rows, const float* input,
const int input_stride, const float* params,
const int params_stride,
const float* output_deriv,
const int output_deriv_stride,
const double* deriv_sum_in,
const int deriv_sum_in_stride,
const float* self_repair_config, double count,
float* input_deriv,
const int input_deriv_stride,
float* params_deriv,
const int params_deriv_stride,
double* value_sum_out,
const int value_sum_out_stride,
double* deriv_sum_out,
const int deriv_sum_out_stride,
float* self_repair_sum_out,
const int self_repair_sum_out_stride) {
hipLaunchKernelGGL(( _diff_lstm_nonlinearity), dim3(Gr), dim3(Bl), 0, 0,
cell_dim, have_dropout_mask, num_rows, input,
input_stride, params, params_stride, output_deriv, output_deriv_stride,
deriv_sum_in, deriv_sum_in_stride, self_repair_config, count, input_deriv,
input_deriv_stride, params_deriv, params_deriv_stride, value_sum_out,
value_sum_out_stride, deriv_sum_out, deriv_sum_out_stride,
self_repair_sum_out, self_repair_sum_out_stride);
}
void cudaD_copy_cols_from_vec(dim3 Gr, dim3 Bl, double *mat_out,
MatrixDim d_out, const double *v_in) {
hipLaunchKernelGGL(( _copy_cols_from_vec), dim3(Gr), dim3(Bl), 0, 0, mat_out, d_out, v_in);
}
void cudaF_copy_cols_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out,
const float *v_in) {
hipLaunchKernelGGL(( _copy_cols_from_vec), dim3(Gr), dim3(Bl), 0, 0, mat_out, d_out, v_in);
}
void cudaF_diff_normalize_per_row(size_t Gr, size_t Bl, float *id,
int id_stride, const float *iv,
MatrixDim iv_dim, const float* od,
int od_stride, float target_rms,
bool add_log_stddev) {
hipLaunchKernelGGL(( _diff_normalize_per_row), dim3(Gr), dim3(Bl), 0, 0, id, id_stride, iv, iv_dim, od, od_stride,
target_rms, add_log_stddev);
}
void cudaD_diff_normalize_per_row(size_t Gr, size_t Bl, double *id,
int id_stride, const double *iv,
MatrixDim iv_dim, const double* od,
int od_stride, double target_rms,
bool add_log_stddev) {
hipLaunchKernelGGL(( _diff_normalize_per_row), dim3(Gr), dim3(Bl), 0, 0, id, id_stride, iv, iv_dim, od, od_stride,
target_rms, add_log_stddev);
}
void cudaD_select_rows(dim3 Gr, dim3 Bl, const int* out_row_ptr,
int* out_col_idx, double* out_val,
const int* row_indexes, const int num_selected_rows,
const int* in_row_ptr, const int* in_col_idx,
const double* in_val) {
hipLaunchKernelGGL(( _select_rows), dim3(Gr), dim3(Bl), 0, 0, out_row_ptr, out_col_idx, out_val, row_indexes,
num_selected_rows, in_row_ptr, in_col_idx, in_val);
}
void cudaF_select_rows(dim3 Gr, dim3 Bl, const int* out_row_ptr,
int* out_col_idx, float* out_val, const int* row_indexes,
const int num_selected_rows, const int* in_row_ptr,
const int* in_col_idx, const float* in_val) {
hipLaunchKernelGGL(( _select_rows), dim3(Gr), dim3(Bl), 0, 0, out_row_ptr, out_col_idx, out_val, row_indexes,
num_selected_rows, in_row_ptr, in_col_idx, in_val);
}
void cudaD_add_smat(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
double alpha, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val) {
hipLaunchKernelGGL(( _add_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaF_add_smat(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
float alpha, const int* smat_row_ptr,
const int* smat_col_idx, const float* smat_val) {
hipLaunchKernelGGL(( _add_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaD_add_smat_trans(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
double alpha, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val) {
hipLaunchKernelGGL(( _add_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaF_add_smat_trans(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
float alpha, const int* smat_row_ptr,
const int* smat_col_idx, const float* smat_val) {
hipLaunchKernelGGL(( _add_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaD_apply_exp_special(dim3 Gr, dim3 Bl, double* out, MatrixDim out_dim,
const double* in, int in_stride) {
hipLaunchKernelGGL(( _apply_exp_special), dim3(Gr), dim3(Bl), 0, 0, out, out_dim, in, in_stride);
}
void cudaF_apply_exp_special(dim3 Gr, dim3 Bl, float* out, MatrixDim out_dim,
const float* in, int in_stride) {
hipLaunchKernelGGL(( _apply_exp_special), dim3(Gr), dim3(Bl), 0, 0, out, out_dim, in, in_stride);
}
| f9a5bf3605d919ea68317c1a282e93d2847af099.cu | // cudamatrix/cu-kernels.cu
// Copyright 2009-2012 Karel Vesely
// 2013 Ehsan Variani
// 2013 Johns Hopkins University (author: Daniel Povey)
// 2013 Hainan Xu
// 2013 Xiaohui Zhang
// 2013-2015 Guoguo Chen
// 2016-2017 Shiyin Kang
// 2017 Hossein Hadian
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include <limits>
#include <math_constants.h>
#include "cudamatrix/cu-kernels-ansi.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while (nTotalThreads > 1) {
int32_cuda halfPoint = ((1 + nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x >= halfPoint) { // was <
// Get the shared value stored by another thread
Real temp = 0.0;
if (threadIdx.x < nTotalThreads) { // was +halfPoint
temp = buffer[threadIdx.x]; // was +halfPoint
}
buffer[threadIdx.x - halfPoint] += temp;
}
__syncthreads();
nTotalThreads = ((1 + nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
/*
* CuMatrix
*/
template<typename Real>
__global__
static void _copy_low_upp(Real* A, MatrixDim dimA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i <= j || i >= dimA.rows)
return;
int index_1 = i * dimA.stride + j;
int index_2 = j * dimA.stride + i;
A[index_2] = A[index_1];
}
template<typename Real>
__global__
static void _copy_upp_low(Real* A, MatrixDim dimA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j <= i || j >= dimA.rows)
return;
int index_1 = i * dimA.stride + j;
int index_2 = j * dimA.stride + i;
A[index_2] = A[index_1];
}
// mat += diag(vec) * mat2.
template<typename Real>
__global__
static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim,
const Real *vec, const Real *mat2,
int mat2_row_stride, int mat2_col_stride,
Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = j * mat_dim.stride + i, index2 = j * mat2_row_stride
+ i * mat2_col_stride;
if (i < mat_dim.cols && j < mat_dim.rows) {
mat[index] = alpha * vec[j] * mat2[index2] + beta * mat[index];
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dmat.cols && j < dmat.rows) {
int32_cuda index_B = (j * (j + 1) / 2) + i;
int32_cuda index_A = j * dmat.stride + i;
if (i <= j) {
A[index_A] = B[index_B];
} else {
A[index_A] = 0.0;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) {
// we interpret these indexes oppositely from normal, but it doesn't
// matter as it's invoked in a symmetric way.
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
// transpose the indices used to index the source TpMatrix.
if (i < dmat.rows && j < dmat.cols) {
int32_cuda index_B = (j * (j + 1) / 2) + i;
int32_cuda index_A = i * dmat.stride + j;
if (i <= j) {
A[index_A] = B[index_B];
} else {
A[index_A] = 0.0;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row-index.
int32_cuda index_out = i + j * d_out.stride;
int32_cuda index_in = i + j * d_in.stride;
if (i < d_out.cols && j < d_out.rows)
mat_out[index_out] = static_cast<Real>(mat_in[index_in]);
}
template<int TileDim, typename Real, typename OtherReal>
__global__
static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in,
MatrixDim d_out, MatrixDim d_in) {
// Use shared meme to achieve both coalesced memory reading and writing
// '+1' to avoid bank conflict when reading sbuf
__shared__ Real sbuf[TileDim][TileDim + 1];
const int32_cuda i_in = blockIdx.y * TileDim + threadIdx.y; // row-index
const int32_cuda j_in = blockIdx.x * TileDim + threadIdx.x; // col-index
const int32_cuda tile_stride_in = CU1DBLOCK / TileDim * d_in.stride;
int32_cuda index_in = i_in * d_in.stride + j_in;
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_in + i < d_in.rows && j_in < d_in.cols) {
sbuf[threadIdx.y + i][threadIdx.x] = static_cast<Real>(mat_in[index_in]);
}
index_in += tile_stride_in;
}
__syncthreads();
// Grid is transposed, but block is not yet.
// Warp (blockDim.x) is always along the row-dim.
const int32_cuda i_out = blockIdx.x * TileDim + threadIdx.y;
const int32_cuda j_out = blockIdx.y * TileDim + threadIdx.x;
const int32_cuda tile_stride_out = CU1DBLOCK / TileDim * d_out.stride;
int32_cuda index_out = i_out * d_out.stride + j_out;
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_out + i < d_out.rows && j_out < d_out.cols) {
// block is tranposed when reading sbuf
mat_out[index_out] = sbuf[threadIdx.x][threadIdx.y + i];
}
index_out += tile_stride_out;
}
}
// Copy from CSR sparse matrix to dense matrix
//
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows.
template<typename Real, typename OtherReal>
__global__
static void _copy_from_smat(Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const OtherReal* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx
if (i < mat_dim.rows) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx
mat[i * mat_dim.stride + j] = static_cast<Real>(smat_val[nz_id]);
}
}
}
/// Select a subset of the rows of a CSR SparseMatrix.
/// Sets 'out' to only the rows of 'in' that are listed
/// in 'row_indexes'. 'row_indexes' must be sorted and unique,
/// and satisfy 0 <= row_indexes[i] < in.size().
///
/// Note: 'out_row_ptr' is an input parameter that is calculated before
/// calling this kernel function
///
/// We use warpSize threads per row to access only the nnz elements.
/// Every CU1DBLOCK/warpSize rows share one thread block.
/// 1D grid to cover all selected rows.
template<typename Real>
__global__
static void _select_rows(const int* out_row_ptr, int* out_col_idx,
Real* out_val, const int* row_indexes,
const int num_selected_rows, const int* in_row_ptr,
const int* in_col_idx, const Real* in_val) {
const int out_i = blockIdx.x * blockDim.y + threadIdx.y; // out row idx
if (out_i < num_selected_rows) {
const int in_i = row_indexes[out_i];
const int in_row_start = in_row_ptr[in_i];
const int out_row_start = out_row_ptr[out_i];
const int row_length = in_row_ptr[in_i + 1] - in_row_start;
for (int k = threadIdx.x; k < row_length; k += warpSize) {
const int in_n = in_row_start + k;
const int out_n = out_row_start + k;
out_col_idx[out_n] = in_col_idx[in_n];
out_val[out_n] = in_val[in_n];
}
}
}
// mat += alpha * smat
//
// We use warpSize threads per row to access only the nonzero elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _add_smat(Real* mat, MatrixDim mat_dim, Real alpha,
const int* smat_row_ptr, const int* smat_col_idx,
const Real* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx
if (i < mat_dim.rows) {
const int row_start = smat_row_ptr[i];
const int row_end = smat_row_ptr[i + 1];
for (int n = row_start + threadIdx.x; n < row_end; n += warpSize) {
const int j = smat_col_idx[n]; // col idx of smat
mat[i * mat_dim.stride + j] += alpha * smat_val[n];
}
}
}
// mat += alpha * smat^T
//
// We use warpSize threads per row to access only the nonzero elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _add_smat_trans(Real* mat, MatrixDim mat_dim, Real alpha,
const int* smat_row_ptr, const int* smat_col_idx,
const Real* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx
if (i < mat_dim.cols) {
const int row_start = smat_row_ptr[i];
const int row_end = smat_row_ptr[i + 1];
for (int n = row_start + threadIdx.x; n < row_end; n += warpSize) {
const int j = smat_col_idx[n]; // col idx of smat
mat[j * mat_dim.stride + i] += alpha * smat_val[n];
}
}
}
/// For each element x of the matrix, set it to
/// (x < 0 ? exp(x) : x + 1).
/// Use block/grid sizes for simple matrix ops
template<typename T>
__global__
static void _apply_exp_special(T* out, MatrixDim out_dim, const T* in,
int in_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < out_dim.rows && j < out_dim.cols) {
T x = in[i * in_stride + j];
if (x < T(0)) {
out[i * out_dim.stride + j] = exp(x);
} else {
out[i * out_dim.stride + j] = x + T(1);
}
}
}
/// Fill the array 'data' with the sequence [base ... base + length)
/// Use 1D block and 1D grid
template<typename T>
__global__
static void _sequence(T* data, int length, T base) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < length) {
data[i] = base + T(i);
}
}
// Copy from CSR sparse matrix to transposed dense matrix
//
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows.
template<typename Real, typename OtherReal>
__global__
static void _copy_from_smat_trans(Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr,
const int* smat_col_idx,
const OtherReal* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat
if (i < mat_dim.cols) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx of smat
mat[j * mat_dim.stride + i] = static_cast<Real>(smat_val[nz_id]);
}
}
}
// First stage of trace(mat * smat^T)
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _trace_mat_smat_trans(const Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr,
const int* smat_col_idx, const Real* smat_val,
Real* trace_vec) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat
if (i < mat_dim.rows) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx of smat
trace_vec[nz_id] = mat[i * mat_dim.stride + j] * smat_val[nz_id];
}
}
}
// First stage of trace(mat * smat)
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _trace_mat_smat(const Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const Real* smat_val, Real* trace_vec) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat
if (i < mat_dim.cols) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx of smat
trace_vec[nz_id] = mat[j * mat_dim.stride + i] * smat_val[nz_id];
}
}
}
template<typename Real>
__global__
static void _apply_exp(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
mat[index] = exp(mat[index]);
}
}
template<typename Real>
__global__
static void _scale_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
mat[index] = value * mat[index];
}
}
template<typename Real>
__global__
static void _set_diag(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = i + i * d.stride;
if (i < d.rows && i < d.cols) {
mat[index] = value;
}
}
template<typename Real>
__global__
static void _set_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
mat[index] = value;
}
}
template<typename Real>
__global__
static void _add_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
mat[index] = mat[index] + value;
}
}
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = value;
}
template<typename Real>
__global__
static void _set_zero_above_diag(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < i)
mat[index] = 0.0;
}
template<typename Real>
__global__
static void _add(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] + value;
}
template<typename Real>
__global__
static void _scale(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] * value;
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride;
if (i < dst_d.cols && j < dst_d.rows)
mat[dst_index] = mat[dst_index] * A[src_index];
}
template<typename Real>
__global__
static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride;
if (i < dst_d.cols && j < dst_d.rows)
mat[dst_index] = mat[dst_index] / A[src_index];
}
template<typename Real>
__global__
static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride;
if (i < dst_d.cols && j < dst_d.rows) {
Real a = mat[dst_index], b = A[src_index];
mat[dst_index] = fmax(a, b);
}
}
template<typename Real>
__global__
static void _min(Real* mat, const Real* other, MatrixDim mat_d,
int other_stride) {
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda mat_index = i * mat_d.stride + j;
int32_cuda other_index = i * other_stride + j;
if (j < mat_d.cols && i < mat_d.rows) {
Real a = mat[mat_index], b = other[other_index];
mat[mat_index] = fmin(a, b);
}
}
template<typename Real>
__global__
static void _vec_mul_elements(Real* v, const Real* a, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
v[i] = v[i] * a[i];
}
template<typename Real>
__global__
static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d,
int src_stride, int group_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols) {
int dst_index = i + j * d.stride;
int src_index = i / group_size + j * src_stride;
y[dst_index] *= x[src_index];
}
}
template<typename Real>
__global__
void _diff_group_pnorm(Real *id, const Real *iv, const Real *ov, const Real* od,
MatrixDim id_dim, int iv_stride, int ov_stride,
int od_stride, int group_size, Real power) {
const int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < id_dim.cols) {
const int grid_stride = gridDim.y * blockDim.y;
const int src_j = j / group_size;
int i = blockIdx.y * blockDim.y + threadIdx.y;
for (; i < id_dim.rows; i += grid_stride) {
const int iv_index = j + i * iv_stride;
Real iv_ij = iv[iv_index];
Real ans;
if (power == Real(2)) {
const int ov_index = src_j + i * ov_stride;
Real ov_ij = ov[ov_index];
ans = ov_ij <= 0.0 ? 0.0 : iv_ij / ov_ij;
} else if (power == Real(1)) {
Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1);
ans = (iv_ij == Real(0) ? 0.0 : iv_ij_sign);
} else if (power
== (sizeof(Real) == sizeof(float) ? CUDART_INF_F : CUDART_INF)) {
const int ov_index = src_j + i * ov_stride;
Real ov_ij = ov[ov_index];
Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1);
ans =
ov_ij <= 0.0 ?
0.0 : (iv_ij_sign * (abs(iv_ij) == ov_ij ? 1.0 : 0.0));
} else {
const int ov_index = src_j + i * ov_stride;
Real ov_ij = ov[ov_index];
Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1);
if (ov_ij <= 0.0) {
ans = 0.0; // The derivative is either 0 or undefined at the origin.
} else {
ans = iv_ij_sign * pow(std::abs(iv_ij), power - 1)
* pow(ov_ij, 1 - power);
}
}
const int od_index = src_j + i * od_stride;
const int id_index = j + i * id_dim.stride;
id[id_index] = ans * od[od_index];
}
}
}
/// deriv is the derivative we will output; vec is the input we're computing
/// the group max on, "maxv" is the previously computed group max.
template<typename Real>
__global__
static void _calc_group_max_deriv(Real *deriv, const Real *vec,
const Real *maxv, MatrixDim deriv_dim,
int vec_stride, int maxv_stride,
int group_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < deriv_dim.rows && i < deriv_dim.cols) {
int deriv_index = i + j * deriv_dim.stride;
int vec_index = i + j * vec_stride;
int maxv_index = i / group_size + j * maxv_stride;
Real vec_element = vec[vec_index], // The element of the original vector.
max_element = maxv[maxv_index]; // this is the max value
Real ans = (max_element == vec_element ? 1.0 : 0.0);
deriv[deriv_index] = ans;
}
}
/// Set each element to y = (x == orig ? changed : x).
template<typename Real>
__global__
static void _replace_value(Real *vec, int dim, Real orig, Real changed) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
if (vec[i] == orig)
vec[i] = changed;
}
template<typename Real>
__global__
static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) {
const int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y;
if (i < d.rows) {
const int32_cuda start = i * d.stride;
const Real scale = Real(1) / vec_div[i];
const int32_cuda grid_stride = blockDim.x * gridDim.x;
for (int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; j < d.cols; j +=
grid_stride) {
mat[start + j] *= scale;
}
}
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int32_cuda index = i + j * d.stride;
int32_cuda index_src = i + j * src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
int32_cuda index_src = j + i * src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_blocks(Real alpha, const Real* src,
int32_cuda num_row_blocks,
int32_cuda num_col_blocks, Real* dst, MatrixDim d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
int32_cuda index_src = i + j * src_stride;
if (i < d.cols && j < d.rows)
for (int32_cuda p = 0; p < num_row_blocks; p++) {
for (int32_cuda q = 0; q < num_col_blocks; q++) {
dst[index] = alpha
* src[index_src + p * src_stride * d.rows + q * d.cols]
+ dst[index];
}
}
}
template<typename Real>
__global__
static void _add_mat_repeated(Real alpha, const Real* src,
MatrixDim src_dim, Real* dst,
MatrixDim dst_dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda src_i = i % src_dim.cols,
src_j = j % src_dim.rows,
dst_index = i + j * dst_dim.stride,
src_index = src_i + src_j * src_dim.stride;
if (i < dst_dim.cols && j < dst_dim.rows)
dst[dst_index] += alpha * src[src_index];
}
template<typename Real>
__global__
static void _add_mat_blocks_trans(Real alpha, const Real* src,
int32_cuda num_row_blocks,
int32_cuda num_col_blocks, Real* dst,
MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
int32_cuda index_src = j + i * src_stride;
if (i < d.cols && j < d.rows)
for (int32_cuda p = 0; p < num_row_blocks; p++) {
for (int32_cuda q = 0; q < num_col_blocks; q++) {
dst[index] = alpha
* src[index_src + p * src_stride * d.cols + q * d.rows]
+ dst[index];
}
}
}
template<typename Real>
__global__
static void _set_mat_mat_div_mat(const Real* A, const Real* B, const Real* C,
Real* dst, MatrixDim d, int stride_a,
int stride_b, int stride_c) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride, a_index = i + j * stride_a, b_index = i
+ j * stride_b, c_index = i + j * stride_c;
if (i < d.cols && j < d.rows)
if (C[c_index] == 0)
dst[index] = A[a_index];
else
dst[index] = A[a_index] * B[b_index] / C[c_index];
}
// Given a matrix input S (not packed!) and a lower-triangular matrix L, this
// function does S = beta S + alpha * L^T L. This is used in PSD matrix
// inversion. The i index is the row of the destination S and the j the column
// (although of course the output is symmetric so it doesn't matter in a sense).
// The main point of this is to make use of various symmetries and zero-ness.
template<typename Real>
__global__
static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim,
Real *S, MatrixDim sdim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= sdim.rows || j > i)
return;
// this thread computes the dot-product of the i'th column of
// L with the j'th column of L. The values we're multiplying
// are only nonzero for row-index k greater or equal to
// max(i, j), which equals i.
Real sum = 0.0;
for (int k = i; k < sdim.rows; k++) {
int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k;
sum += T[i_index] * T[j_index];
}
int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i;
S[output_index1] = alpha * sum + beta * S[output_index1];
S[output_index2] = alpha * sum + beta * S[output_index2];
}
template<typename Real>
__global__
static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst,
MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * col[j] + beta * dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst,
MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * row[i] + beta * dst[index];
}
template<typename Real>
__global__
static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat,
MatrixDim dmask) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * dmat.stride;
int32_cuda index2 = i + j * dmask.stride;
if (i < dmat.cols && j < dmat.rows)
if (mask[index2] == 0)
mat[index] = 0;
}
template<typename Real>
__global__
static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim,
const Real *mat2, int mat2_row_stride,
int mat2_col_stride, const Real *vec, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * mat_dim.stride, index2 = i * mat2_col_stride
+ j * mat2_row_stride;
if (j < mat_dim.rows && i < mat_dim.cols)
mat[index] = alpha * mat2[index2] * vec[i] + beta * mat[index];
}
template<typename Real>
__global__
static void _add_mat_mat_elements(Real *data, const Real *srcA_data,
const Real *srcB_data, MatrixDim dim,
int srcA_stride, int srcB_stride, Real alpha,
Real beta) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda tgt_index = i + j * dim.stride;
int32_cuda srcA_index = i + j * srcA_stride;
int32_cuda srcB_index = i + j * srcB_stride;
if (i < dim.cols && j < dim.rows) {
data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index]
+ beta * data[tgt_index];
}
}
/*
* CuVector
*/
// very limited application!
template<typename Real>
__global__
static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2,
Real param_3, int* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
Real ratio = a[i] / param_3;
if ((ratio < 0.0) || (ratio >= 1.01)) {
*flag = 1;
return;
}
if (ratio < param_1) {
Real factor = ((param_1 / ratio) > param_2) ? param_2 : (param_1 / ratio);
v[i] = v[i] / factor;
} else if (ratio > param_1) {
Real factor = ((ratio / param_1) > param_2) ? param_2 : (ratio / param_1);
v[i] = v[i] * factor;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _cublas_copy_kaldi(int n, const Real* x, int incx, OtherReal* y,
int incy) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
y[i * incy] = static_cast<OtherReal>(x[i * incx]);
}
}
// This kernel writes a copy of the vector "v_in" to each row of the matrix
// "m_out". the dimension of v_in should be equal to the #columns of m_out.
template<typename Real>
__global__
static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index.
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index.
if (i < d.cols && j < d.rows) {
int index = i + j * d.stride;
m_out[index] = v_in[i];
}
}
// This kernel writes a copy of the vector "v_in" to each col of the matrix
// "m_out". the dimension of v_in should be equal to the #row of m_out.
template<typename Real>
__global__
static void _copy_cols_from_vec(Real* m_out, MatrixDim d, const Real* v_in) {
int i = blockIdx.y * blockDim.y + threadIdx.y; // row id
int j = blockIdx.x * blockDim.x + threadIdx.x; // col id
if (i < d.rows && j < d.cols) {
m_out[i * d.stride + j] = v_in[i];
}
}
// _trace_mat_mat reduce the partial sum to
// value[blockIdx.y * gridDim.x + blockIdx.x]
// It use shared mem to transpose matrix B to ensure coalesced memory access
template<int TileDim, typename Real>
__global__
static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA,
int B_stride, Real* value) {
// Reuse shared mem and make indexing easier. "+1" to avoid bank conflict
__shared__ union {
Real trans[TileDim][TileDim + 1];
Real sum[CU1DBLOCK];
} smem;
// linear thread id;
const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x;
const int32_cuda grid_height = gridDim.y * TileDim;
const int32_cuda ja = blockIdx.x * TileDim + threadIdx.x;
const int32_cuda ib = blockIdx.x * TileDim + threadIdx.y;
int32_cuda ia = blockIdx.y * TileDim + threadIdx.y;
int32_cuda jb = blockIdx.y * TileDim + threadIdx.x;
// Grid reduce
Real tsum = Real(0);
for (int32_cuda i0 = 0; i0 < dA.rows; i0 += grid_height) {
// Load from B, transpose the block and store in shared mem
if (jb < dA.rows) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (ib + i < dA.cols) {
smem.trans[threadIdx.x][threadIdx.y + i] =
B[(ib + i) * B_stride + jb];
}
}
}
__syncthreads();
// Load from A, sum up the product.
if (ja < dA.cols) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (ia + i < dA.rows) {
tsum += A[(ia + i) * dA.stride + ja]
* smem.trans[threadIdx.y + i][threadIdx.x];
}
}
}
__syncthreads();
ia += grid_height;
jb += grid_height;
}
smem.sum[tid] = tsum;
__syncthreads();
// Block reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
smem.sum[tid] += smem.sum[tid + shift];
__syncthreads();
}
// Warp reduce. Implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem.sum[tid] += smem.sum[tid + shift];
}
}
// output 1 sum per thread block
if (tid == 0) {
value[blockIdx.y * gridDim.x + blockIdx.x] = smem.sum[0];
}
}
// _trace_mat_mat_trans reduce the partial sum to
// value[blockIdx.y * gridDim.x + blockIdx.x]
template<typename Real>
__global__
static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA,
int B_stride, Real* value) {
__shared__ Real ssum[CU1DBLOCK];
// linear thread id;
const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x;
const int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
const int32_cuda grid_height = gridDim.y * blockDim.y;
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y;
// Grid reduce
Real tsum = Real(0);
if (j < dA.cols) {
while (i < dA.rows) {
tsum += A[i * dA.stride + j] * B[i * B_stride + j];
i += grid_height;
}
}
ssum[tid] = tsum;
__syncthreads();
// Block reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
ssum[tid] += ssum[tid + shift];
__syncthreads();
}
// Warp reduce. Implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// output 1 sum per thread block
if (tid == 0) {
value[blockIdx.y * gridDim.x + blockIdx.x] = ssum[0];
}
}
// v = alpha * diag(M * N^T) + beta * v
template<typename Real>
__global__
static void _add_diag_mat_mat_MNT(const Real alpha, const Real* M,
const MatrixDim dim_M, const Real* N,
const int stride_N, const Real beta,
Real* v) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int m_start = i * dim_M.stride;
const int n_start = i * stride_N;
// Loop along the matrix row. Reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < dim_M.cols; j += CU1DBLOCK) {
tsum += M[m_start + j] * N[n_start + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
ssum[tid] += ssum[tid + shift];
__syncthreads();
}
// Warp reduce to 1 element. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// output 1 sum per thread block
if (tid == 0) {
v[i] = alpha * ssum[0] + beta * v[i];
}
}
// v = alpha * diag(M^T * N) + beta * v
template<int TileDim, typename Real>
__global__
static void _add_diag_mat_mat_MTN(const Real alpha, const Real* M,
const int stride_M, const Real* N,
const MatrixDim dim_N, const Real beta,
Real* v) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= dim_N.cols)
return;
// Loop along the matrix column.
// Reduce to CU1DBLOCK / TileDim elements per column.
Real tsum = Real(0);
for (int i = threadIdx.y; i < dim_N.rows; i += blockDim.y) {
tsum += M[i * stride_M + j] * N[i * dim_N.stride + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize / TileDim elements per column.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize && shift >= TileDim;
shift >>= 1) {
if (tid < shift) {
ssum[tid] += ssum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element per column.
// Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift >= TileDim; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// output TileDim sums per thread block
if (tid < TileDim) {
v[j] = alpha * ssum[tid] + beta * v[j];
}
}
// v = alpha * diag(M * N) + beta * v
template<int TileDim, typename Real>
__global__
static void _add_diag_mat_mat_MN(const Real alpha, const Real* M,
const int stride_M, const Real* N,
const MatrixDim dim_N, const Real beta,
Real* v) {
// Reuse shared mem and make indexing easier. "+1" to avoid bank conflict
__shared__ union {
Real trans[TileDim][TileDim + 1];
Real sum[CU1DBLOCK];
} smem;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int i_m = blockIdx.x * TileDim + threadIdx.y;
const int j_n = blockIdx.x * TileDim + threadIdx.x;
int i_n = threadIdx.y;
int j_m = threadIdx.x;
// Loop along the matrix column.
// Reduce to CU1DBLOCK / TileDim elements per column.
Real tsum = Real(0);
for (int block_i_n = 0; block_i_n < dim_N.rows; block_i_n += TileDim) {
// Load, transpose and store M to shared mem.
if (j_m < dim_N.rows) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_m + i < dim_N.cols) {
smem.trans[threadIdx.x][threadIdx.y + i] = M[(i_m + i) * stride_M
+ j_m];
}
}
}
__syncthreads();
// Load N, sum up the product.
if (j_n < dim_N.cols) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_n + i < dim_N.rows) {
tsum += N[(i_n + i) * dim_N.stride + j_n]
* smem.trans[threadIdx.y + i][threadIdx.x];
}
}
}
__syncthreads();
i_n += TileDim;
j_m += TileDim;
}
smem.sum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize / TileDim elements per column.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize && shift >= TileDim;
shift >>= 1) {
if (tid < shift) {
smem.sum[tid] += smem.sum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element per column.
// Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift >= TileDim; shift >>= 1) {
smem.sum[tid] += smem.sum[tid + shift];
}
}
// output TileDim sums per thread block
if (tid < TileDim && j_n < dim_N.cols) {
v[j_n] = alpha * smem.sum[tid] + beta * v[j_n];
}
}
template<typename Real>
__global__
static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y,
Real beta, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = alpha * x[i] * y[i] + beta * v[i];
}
template<typename Real>
__global__
static void _copy_col_from_mat_df(double* v, int col, const Real* mat,
MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = (double) mat[index];
}
template<typename Real>
__global__
static void _copy_col_from_mat_fd(float* v, int col, const Real* mat,
MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = (float) mat[index];
}
template<typename Real>
__global__
static void _vec_apply_exp(Real* v, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
v[i] = exp(v[i]);
}
}
template<typename Real>
__global__
static void _vec_apply_log(Real* v, Real* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
if (v[i] < 0) {
*flag = 1;
return;
}
v[i] = log(v[i]);
}
}
template<typename Real>
__global__
static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z,
MatrixDim d, Real* z2, MatrixDim d2, Real* t) {
int i = threadIdx.x;
__shared__ Real tot_objf[CU1DBLOCK];
__shared__ Real tot_weight[CU1DBLOCK];
Real tmp_weight_sum = 0;
Real tmp_tot_objf = 0;
int size = s / CU1DBLOCK; //the least size in a loop (later part)
int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1
int loop_start;
int loop_end;
if (i < threshold) {
loop_start = i * (size + 1);
loop_end = (i + 1) * (size + 1);
} else {
loop_start = threshold + i * size;
loop_end = threshold + (i + 1) * size;
}
for (int j = loop_start; j < loop_end; j++) {
//* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) );
int m = (x + j)->row;
//*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int));
int label = (x + j)->column;
// *(Real*) ((size_t)x + j*(2*sizeof(int) + sizeof(Real)) + 2*sizeof(int));
Real weight = (x + j)->weight;
tmp_weight_sum += weight;
Real this_prob = *(z + m * d.stride + label);
tmp_tot_objf += weight * log(this_prob);
// there might be problems here....
*(z2 + m * d2.stride + label) += weight / this_prob;
}
tot_objf[i] = tmp_tot_objf;
tot_weight[i] = tmp_weight_sum;
__syncthreads();
*t = _sum_reduce(tot_objf);
__syncthreads();
*(t + 1) = _sum_reduce(tot_weight);
return;
}
template<typename Real>
__global__
static void _cuda_vector_copy_elements(Real *data, int dim,
const Real *src_mat, int mat_stride,
bool transpose,
const MatrixIndexT_cuda* elements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= dim)
return;
int j = elements[i];
int mat_index;
if (transpose)
mat_index = i + j * mat_stride;
else
mat_index = j + i * mat_stride;
data[i] = src_mat[mat_index];
}
template<typename Real>
__global__
static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha,
MatrixElement<Real>* x,
int num_elements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= num_elements)
return;
data[x[i].row * dim.stride + x[i].column] += alpha * x[i].weight;
}
template<typename Real>
__global__
static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha,
const Int32Pair* indices,
const Real* x, int s, Real* data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= s)
return;
int data_i = indices[i].first * dim.stride + indices[i].second;
data[data_i] += alpha * x[i];
}
template<typename Real>
__global__
static void _cuda_matrix_add_to_elements(Real alpha,
Real* mat, MatrixDim dim,
const MatrixIndexT_cuda* elements) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < dim.rows) {
int col = elements[row];
if (col >= 0) {
int index = col + row * dim.stride;
mat[index] += alpha;
}
}
}
template<typename Real>
__global__
static void _matrix_lookup(const Real *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
Real *output) {
int ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= indices_size)
return;
int data_ind = indices[ind].first * dim.stride + indices[ind].second;
output[ind] = data[data_ind];
}
template<typename Real>
__global__
static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask,
MatrixDim mat1_dim, int mat2_stride,
int mask_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row
int32_cuda index_mat1 = i + j * mat1_dim.stride;
int32_cuda index_mat2 = i + j * mat2_stride;
int32_cuda index_mask = i + j * mask_stride;
if (i < mat1_dim.cols && j < mat1_dim.rows)
mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0);
}
enum EnumTransformReduce {
SUMAB, SUM, MAX, MIN, LINFNORM, L2NORM, L1NORM, L0NORM, LPNORM
};
template<EnumTransformReduce TransReduceType, typename Real>
struct TransReduceOp {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return Real(0);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return Real(0);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return Real(0);
}
};
template<typename Real>
struct TransReduceOp<SUMAB, Real> {
const Real alpha_;
const Real beta_;
TransReduceOp(const Real& a, const Real& b) :
alpha_(a), beta_(b) {
}
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
if (beta_ == Real(0)) {
return alpha_ * x;
} else {
return alpha_ * x + beta_ * output;
}
}
};
template<typename Real>
struct TransReduceOp<SUM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<MAX, Real> {
__forceinline__
__device__ Real InitValue() const {
return sizeof(Real) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF;
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return fmax(a, b);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<MIN, Real> {
__forceinline__
__device__ Real InitValue() const {
return sizeof(Real) == sizeof(float) ? CUDART_INF_F : CUDART_INF;
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return min(a, b);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<LINFNORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return abs(x);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return fmax(a, b);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<L2NORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x * x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return sqrt(x);
}
};
template<typename Real>
struct TransReduceOp<L1NORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return abs(x);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<L0NORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return Real(x == Real(0) ? 0 : 1);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<LPNORM, Real> {
const Real power_;
TransReduceOp(const Real& p) :
power_(p) {
}
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return pow(abs(x), power_);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return pow(x, Real(1) / power_);
}
};
// Vector reduce.
template<EnumTransformReduce TransReduceType, typename Real>
__global__
static void _vec_transform_reduce(
const Real* v, Real* result, const int dim, const int inc,
const TransReduceOp<TransReduceType, Real> op) {
__shared__ Real sdata[CU1DBLOCK];
Real tdata = op.InitValue();
const int tid = threadIdx.x;
const int vec_len = dim * inc;
const int grid_stride = gridDim.x * blockDim.x * inc;
int i = (blockIdx.x * blockDim.x + tid) * inc;
// Grid reduce. Loop over the whole vector v.
for (; i < vec_len; i += grid_stride) {
tdata = op.Reduce(tdata, op.Transform(v[i]));
}
sdata[tid] = tdata;
__syncthreads();
// Tree reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
__syncthreads();
}
// Reduce last warp. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
}
// Output to vector result.
if (tid == 0)
result[blockIdx.x] = op.PostReduce(sdata[0], result[blockIdx.x]);
}
// Reduce a matrix 'mat' to a column vector 'result'
template<EnumTransformReduce TransReduceType, typename Real>
__global__
static void _transform_reduce_mat_cols(
Real *result, const Real *mat, const MatrixDim d,
const TransReduceOp<TransReduceType, Real> op) {
__shared__ Real sdata[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int row_start = i * d.stride;
Real tdata = op.InitValue();
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
tdata = op.Reduce(tdata, op.Transform(mat[row_start + j]));
}
sdata[tid] = tdata;
__syncthreads();
// Tree reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
__syncthreads();
}
// Reduce last warp. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1)
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
// Output to vector result.
if (tid == 0) {
result[i] = op.PostReduce(sdata[0], result[i]);
}
}
template<EnumTransformReduce TransReduceType, typename Real>
__global__
static void _group_transform_reduce(
Real *y, const Real *x, const MatrixDim d, const int src_stride,
const int group_size, const TransReduceOp<TransReduceType, Real> op) {
__shared__ Real sreduction[CU1DBLOCK];
const int i = blockIdx.x;
const int x_start = i * src_stride;
const int y_start = i * d.stride;
const int threads_per_group = blockDim.x;
// Reduce n groups per thread block
const int n = blockDim.y;
const int len = group_size * n;
// linear thread id
const int tid = threadIdx.y * threads_per_group + threadIdx.x;
int j = threadIdx.y * group_size + threadIdx.x; // col-id of *x
int group_id = threadIdx.y; // col-id of *y
int group_end = x_start + (group_id + 1) * group_size;
while (group_id < d.cols) {
// reduce to threads_per_group elements per group
int x_idx = x_start + j;
Real treduction = op.Transform(x[x_idx]);
x_idx += threads_per_group;
while (x_idx < group_end) {
treduction = op.Reduce(treduction, op.Transform(x[x_idx]));
x_idx += threads_per_group;
}
sreduction[tid] = treduction;
if (threads_per_group > warpSize) {
__syncthreads();
}
// tree-reduce to 2x warpSize elements per group
# pragma unroll
for (int shift = threads_per_group / 2; shift > warpSize; shift >>= 1) {
if (threadIdx.x < shift) {
sreduction[tid] = op.Reduce(sreduction[tid], sreduction[tid + shift]);
}
__syncthreads();
}
// Warp-reduce to 1 element per group.
// Threads implicitly synchronized within the warp.
const int warp_reduce_size =
threads_per_group / 2 < warpSize ? threads_per_group / 2 : warpSize;
if (threadIdx.x < warp_reduce_size) {
# pragma unroll
for (int shift = warp_reduce_size; shift > 0; shift >>= 1) {
sreduction[tid] = op.Reduce(sreduction[tid], sreduction[tid + shift]);
}
}
// Store the result.
if (threadIdx.x == 0) {
y[y_start + group_id] = op.PostReduce(sreduction[tid],
y[y_start + group_id]);
}
j += len;
group_end += len;
group_id += n;
}
}
template<typename Real>
__global__
static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
if (v[i] < floor_val) {
v[i] = floor_val;
count[i] = 1;
} else {
count[i] = 0;
}
}
}
template<typename Real>
__global__
static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count,
int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
if (v[i] > ceiling_val) {
v[i] = ceiling_val;
count[i] = 1;
} else {
count[i] = 0;
}
}
}
template<typename Real>
__global__
static void _apply_pow(Real* mat, Real power, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (power == 1.0)
return;
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
if (!(mat[index] >= 0.0))
return;
mat[index] = sqrt(mat[index]);
} else {
mat[index] = pow(mat[index], power);
}
}
}
template<typename Real>
__global__
static void _apply_pow_abs(Real* mat, Real power, bool include_sign,
MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (include_sign == true && mat[index] < 0) {
if (power == 1.0)
mat[index] = -std::abs(mat[index]);
if (power == 2.0) {
mat[index] = -mat[index] * mat[index];
} else if (power == 0.5) {
mat[index] = -sqrt(std::abs(mat[index]));
} else {
mat[index] = -pow(std::abs(mat[index]), power);
}
} else {
if (power == 1.0)
mat[index] = std::abs(mat[index]);
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
mat[index] = sqrt(std::abs(mat[index]));
} else if (power < 0.0 && mat[index] == 0.0) {
mat[index] = 0.0;
} else {
mat[index] = pow(std::abs(mat[index]), power);
}
}
}
}
template<typename Real>
__global__
static void _apply_heaviside(Real* mat, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0);
}
template<typename Real>
__global__
static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (mat[index] < floor_val)
mat[index] = floor_val;
}
}
template<typename Real>
__global__
static void _copy_cols(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int index = reorder[i], dst_index = j * dst_dim.stride + i;
if (index >= 0) {
int src_index = j * src_stride + reorder[i];
Real val = src[src_index];
dst[dst_index] = val;
} else {
dst[dst_index] = 0.0;
}
}
}
template<typename Real>
__global__
static void _add_cols(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int index = reorder[i], dst_index = j * dst_dim.stride + i;
if (index >= 0) {
int src_index = j * src_stride + index;
Real val = src[src_index];
dst[dst_index] += val;
}
}
}
template<typename Real>
__global__
static void _copy_rows(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int index = reorder[j], dst_index = j * dst_dim.stride + i;
if (index >= 0) {
int src_index = reorder[j] * src_stride + i;
Real val = src[src_index];
dst[dst_index] = val;
} else {
dst[dst_index] = 0;
}
}
}
template<typename Real>
__global__
static void _copy_rows(Real* dst, const Real * const *src, MatrixDim dst_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
const Real *pointer = src[j];
if (pointer != NULL) {
dst[dst_index] = pointer[i];
} else {
dst[dst_index] = 0;
}
}
}
template<typename Real>
__global__
static void _copy_to_rows(Real* const * dst, const Real *src,
MatrixDim src_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < src_dim.cols && j < src_dim.rows) {
Real *pointer = dst[j];
if (pointer != NULL) {
pointer[i] = src[j * src_dim.stride + i];
}
}
}
template<typename Real>
__global__
static void _add_rows(Real alpha, Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
if (reorder[j] >= 0) {
int src_index = reorder[j] * src_stride + i;
dst[dst_index] += alpha * src[src_index];
}
}
}
template<typename Real>
__global__
static void _add_rows(Real alpha, Real* dst, const Real * const *src,
MatrixDim dst_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
if (src[j] != NULL) {
dst[dst_index] += alpha * src[j][i];
}
}
}
template<typename Real>
__global__
static void _add_to_rows(Real alpha, Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim src_dim,
int dst_stride) {
int c = blockIdx.x * blockDim.x + threadIdx.x; // col index
int r = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (c < src_dim.cols && r < src_dim.rows) {
int src_index = r * src_dim.stride + c;
if (reorder[r] >= 0) {
int dst_index = reorder[r] * dst_stride + c;
dst[dst_index] += alpha * src[src_index];
}
}
}
template<typename Real>
__global__
static void _add_to_rows(Real alpha, Real* const * dst, const Real *src,
MatrixDim src_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < src_dim.cols && j < src_dim.rows) {
if (dst[j] != NULL) {
dst[j][i] += alpha * src[j * src_dim.stride + i];
}
}
}
template<typename Real>
__global__
static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (mat[index] > ceiling_val)
mat[index] = ceiling_val;
}
}
template<typename Real>
__global__
static void _invert_elements(Real* data, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows)
data[index] = 1.0 / data[index];
}
// matrix-wise, do data = alpha * data + beta * A * B^T,
// where B is a block matrix.
template<typename Real>
__global__
static void _add_mat_blockmat_trans(Real *data, MatrixDim dim,
const Real *A_data, int A_num_rows,
int A_num_cols, int A_row_stride,
int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks)
return;
const CuBlockMatrixData &cu_data = B_cu_data[j];
// BT means B transposed.
int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset,
BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols =
cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride;
// Cast from void;
const Real *B_data = static_cast<Real*>(cu_data.matrix_data);
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < BT_num_cols; k++) {
const Real *this_BT_col = B_data + k * BT_col_stride;
const Real *this_A_row = A_data + i * A_row_stride
+ BT_row_start * A_col_stride;
// this_A_row points to the element A[i][BT_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B.
sum += this_BT_col[l] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + BT_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
template<typename Real>
__global__
static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data,
int A_num_rows, int A_num_cols, int A_row_stride,
int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks)
return;
const CuBlockMatrixData &block_data = B_cu_data[j];
int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset,
B_num_rows = block_data.matrix_dim.rows, B_num_cols =
block_data.matrix_dim.cols, B_row_stride =
block_data.matrix_dim.stride;
// Cast from void;
const Real *B_data = static_cast<Real*>(block_data.matrix_data);
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < B_num_cols; k++) {
const Real *this_B_col = B_data + k;
const Real *this_A_row = A_data + i * A_row_stride
+ B_row_start * A_col_stride;
// this_A_row points to the element A[i][B_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < B_num_rows; l++) // l indexes rows of B.
sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + B_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
// For a block matrix B, does B = alpha * C * D + beta * B.
// the (x,y,z) indices are the block index, then the row
// and column indices within the block. Note: transposition of C and D
// is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride),
// so it's invisible to this code. The num-cols and num-rows of C and D
// are only provided to the extent that they are not already determined
// by other quantities.
template<typename Real>
__global__
static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks,
const Real *C_data, int C_num_cols,
int C_row_stride, int C_col_stride,
const Real *D_data, int D_row_stride,
int D_col_stride, Real alpha, Real beta) {
int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B.
int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block
int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block
if (b >= num_blocks)
return;
const CuBlockMatrixData &block_data = B_cu_data[b];
if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols)
return; // we're outside the dimensions of the b'th block.
// B_elem is the element of B we're writing to.
Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data)
+ i * block_data.matrix_dim.stride + j;
Real B_val = *B_elem;
// B_row and B_col are the (row, col) index into the full matrix B.
int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j;
const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data
+ D_col_stride * B_col;
Real sum = 0.0;
for (int k = 0; k < C_num_cols; k++) {
sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride];
}
*B_elem = alpha * sum + beta * B_val;
}
template<typename Real>
__global__
static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim,
const Real *A_data, int A_num_rows,
int A_num_cols, int A_row_stride,
int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha,
Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks)
return;
const CuBlockMatrixData &cu_data = B_cu_data[j];
// BT means B transposed.
int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset,
BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols =
cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride;
// Cast from void;
const Real *B_data = static_cast<Real*>(cu_data.matrix_data);
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < BT_num_cols; k++) {
const Real *this_BT_col = B_data + k * BT_col_stride;
const Real *this_A_row = A_data + i * A_row_stride
+ BT_row_start * A_col_stride;
// this_A_row points to the element A[i][BT_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B.
sum += this_BT_col[l] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + BT_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
template<typename Real>
__global__
static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data,
MatrixDim src_dim, const Int32Pair *indices) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= dim.rows || col >= dim.cols)
return;
int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride
+ indices[col].first, src_end_index = row * src_dim.stride
+ indices[col].second;
Real sum = 0.0;
for (int index = src_start_index; index < src_end_index; index++)
sum += src_data[index];
data[dst_index] = sum;
}
template<typename Real>
__global__
static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data,
MatrixDim src_dim, const Int32Pair *indexes) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= dim.rows || col >= dim.cols)
return;
int dst_index = row * dim.stride + col;
int src_index_start = indexes[row].first, src_index_end = indexes[row].second;
for (int row_index = src_index_start; row_index < src_index_end; row_index++)
data[dst_index] += src_data[row_index * src_dim.stride + col];
}
template<typename Real>
__global__
static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
// compute the function y[index] = log(1 + exp(x[index]))
if (i < d.cols && j < d.rows) {
Real val = x[src_index], result;
if (val >= 10.0)
result = val; // function approaches y=x as x gets large
else
result = log1p(exp(val));
y[dst_index] = result;
}
}
template<typename Real>
__global__
static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride,
int group_size, Real power) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols) {
int dst_index = i + j * d.stride;
Real tmp = 0;
int src_begin_index = i * group_size + j * src_stride;
int src_end_index = src_begin_index + group_size;
for (int src_index = src_begin_index; src_index < src_end_index;
src_index++) {
tmp += pow(std::abs(x[src_index]), power);
}
tmp = pow(tmp, Real(1.0 / power));
if (!isnan(tmp)) {
y[dst_index] = tmp;
} else {
Real max_value = x[src_begin_index], min_value = max_value;
for (int src_index = src_begin_index + 1; src_index < src_end_index;
src_index++) {
if (x[src_index] > max_value)
max_value = x[src_index];
if (x[src_index] < min_value)
min_value = x[src_index];
}
tmp = 0.0;
// let max_value be the largest abs(value)
Real max_abs_value = (max_value > -min_value ? max_value : -min_value);
if (max_abs_value == 0) {
y[dst_index] = 0.0;
} else {
for (int src_index = src_begin_index; src_index < src_end_index;
src_index++) {
Real x_scaled = x[src_index] / max_abs_value;
tmp += pow(std::abs(x_scaled), Real(power));
}
y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value;
}
}
}
}
/*
* cu::
*/
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real res = 1.0 / (1.0 + exp(-x[src_index]));
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d,
int e_stride, int y_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride;
int e_index = i + j * e_stride;
int y_index = i + j * y_stride;
if (i < d.cols && j < d.rows)
eout[dst_index] = y[y_index] * (1.0 - y[y_index]) * e[e_index];
}
template<typename Real>
__global__
static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real exp_2x = exp(2.0 * x[src_index]);
Real res;
if (isinf(exp_2x)) {
res = 1.0;
} else {
res = (exp_2x - 1.0) / (exp_2x + 1.0);
}
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d,
int e_stride, int y_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride;
int e_index = i + j * e_stride;
int y_index = i + j * y_stride;
if (i < d.cols && j < d.rows)
eout[dst_index] = (1.0 - y[y_index] * y[y_index]) * e[e_index];
}
template<typename Real>
__global__
static void _parametric_relu(Real* y, const Real* x, MatrixDim d, int src_stride,
const Real* a, const Real* b) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride,
src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real res = (x[src_index] > 0.0) ? a[i] * x[src_index] : b[i] * x[src_index];
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_parametric_relu(Real* eout, const Real* e, const Real* y,
MatrixDim d, int e_stride, int y_stride,
const Real* a, const Real* b) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride;
int e_index = i + j * e_stride;
int y_index = i + j * y_stride;
if (i < d.cols && j < d.rows )
eout[dst_index] = (y[y_index] > 0.0 ? a[i] * e[e_index] : b[i] * e[e_index]);
}
template<typename Real>
__global__
static void _heaviside(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real res = (x[src_index] > 0.0 ? 1.0 : 0.0);
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) {
__shared__ Real smem[CU1DBLOCK];
const int i = blockIdx.x;
const int x_start = i * src_stride;
const int y_start = i * d.stride;
const int tid = threadIdx.x;
// find max element of the row
// reduce to CU1DBLOCK elements per row.
Real tmax = sizeof(Real) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF;
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
tmax = fmax(tmax, x[x_start + j]);
}
smem[tid] = tmax;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
}
// broadcast max to all threads
__syncthreads();
Real max = smem[0];
// sum_j(exp(x(i,j)-max))
// reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
tsum += exp(x[x_start + j] - max);
}
smem[tid] = tsum;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] += smem[tid + shift];
}
}
// broadcast sum to all threads
__syncthreads();
Real inv_sum = Real(1) / smem[0];
// normalize the row
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
y[y_start + j] = exp(x[x_start + j] - max) * inv_sum;
}
}
// The output y_i = scale * x_i,
// and we want to RMS value of the y_i to equal target_rms,
// so y^t y = D * target_rms^2 (if y is one row of the input).
// we need to have scale = 1.0 / sqrt(x^t x / (D * target_rms^2)).
// there is also flooring involved, to avoid division-by-zero
// problems. It's important for the backprop, that the floor's
// square root is exactly representable as float.
// If add_log_stddev is true, log(max(epsi, sqrt(x^t x / D)))
// is an extra dimension of the output.
//
// 1D grid is used. Each 256-thread block works on 1 row of the data matrix.
// The block is also of 1D. Strided memory access is used if the length of the
// row is longer than 256.
template<typename Real>
__global__
static void _normalize_per_row(Real *y, int y_stride, const Real *x,
MatrixDim x_d, Real target_rms,
bool add_log_stddev) {
const int i = blockIdx.x;
const int tid = threadIdx.x;
const Real* x_row = x + i * x_d.stride;
__shared__ Real ssum[CU1DBLOCK];
// Reduce x_j^2 to CU1DBLOCK elements per row
Real tsum = Real(0);
for (int j = tid; j < x_d.cols; j += CU1DBLOCK) {
tsum += x_row[j] * x_row[j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
ssum[tid] += ssum[tid + shift];
__syncthreads();
}
// Reduce last warp to 1 element per row.
// Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
const Real kSquaredNormFloor = 1.3552527156068805425e-20; // 2^-66
if (tid == 0) {
ssum[0] = sqrt(
fmax(ssum[0] / (target_rms * target_rms * x_d.cols), kSquaredNormFloor));
}
// Broadcast floored stddev to all threads.
__syncthreads();
const Real stddev_div_target_rms = ssum[0];
const Real scale = Real(1) / stddev_div_target_rms;
// Store normalized input to output
Real* y_row = y + i * y_stride;
for (int j = tid; j < x_d.cols; j += CU1DBLOCK) {
y_row[j] = x_row[j] * scale;
}
if (tid == 0 && add_log_stddev) {
y_row[x_d.cols] = log(stddev_div_target_rms * target_rms);
}
}
template<typename Real>
__global__
static void _diff_normalize_per_row(Real *id, int id_stride, const Real *iv,
MatrixDim iv_dim, const Real* od,
int od_stride, Real target_rms,
bool add_log_stddev) {
const Real kSquaredNormFloor = 1.3552527156068805425e-20; // 2^-66
const Real kInvNormFloor = 8589934592.0;
const int tid = threadIdx.x;
const int i = blockIdx.x;
const Real* iv_row = iv + i * iv_dim.stride;
const Real* od_row = od + i * od_stride;
// reduce to CU1DBLOCK elements per row
Real dot_products = Real(0);
Real in_norm = Real(0);
for (int j = tid; j < iv_dim.cols; j += CU1DBLOCK) {
const Real iv_ij = iv_row[j];
dot_products += iv_ij * od_row[j];
in_norm += iv_ij * iv_ij;
}
__shared__ Real sprod[CU1DBLOCK];
__shared__ Real snorm[CU1DBLOCK];
sprod[tid] = dot_products;
snorm[tid] = in_norm;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
sprod[tid] += sprod[tid + shift];
snorm[tid] += snorm[tid + shift];
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
sprod[tid] += sprod[tid + shift];
snorm[tid] += snorm[tid + shift];
}
}
// broadcast the sum results
__syncthreads();
dot_products = sprod[0];
in_norm = snorm[0];
Real log_stddev_deriv;
if (add_log_stddev) {
log_stddev_deriv = Real(1) / max(in_norm, iv_dim.cols * kSquaredNormFloor)
* od_row[iv_dim.cols];
}
const Real inv_d_scaled = Real(1) / (iv_dim.cols * target_rms * target_rms);
in_norm = Real(1) / sqrt(max(in_norm * inv_d_scaled, kSquaredNormFloor));
const Real f = in_norm == kInvNormFloor ? Real(0) : in_norm;
dot_products *= f * f * f * inv_d_scaled;
for (int j = tid; j < iv_dim.cols; j += CU1DBLOCK) {
const Real iv_ij = iv_row[j];
Real id_ij = id[i * id_stride + j];
if (add_log_stddev) {
id_ij += log_stddev_deriv * iv_ij;
}
if (id != od) {
id_ij += in_norm * od_row[j];
} else {
id_ij *= in_norm;
}
id_ij -= dot_products * iv_ij;
id[i * id_stride + j] = id_ij;
}
}
// Per-row log-softmax operation on 'x', with writing to 'y'.
// note, x and y may point to the same memory. This is equivalent to setting
// matrix y to matrix x and then, for each row of y, subtracting the offset that
// will make exp(y.row[j]) sum to 1 for each row j.
//
// It expects to be called with CU1DBLOCK threads.
// The number of blocks [i.e. the gridDim] equals to y_dim.rows,
// so one block of threads processes each row. x and y are
// expected to have the same dimension, but possibly different row strides.
template<typename Real>
__global__
static void _log_softmax_reduce(Real* y, const Real* x, MatrixDim y_dim,
int x_stride) {
__shared__ Real smem[CU1DBLOCK];
const int i = blockIdx.x;
const int x_start = i * x_stride;
const int y_start = i * y_dim.stride;
const int tid = threadIdx.x;
// find max element of the row
// reduce to CU1DBLOCK elements per row.
Real tmax = -1e20;
for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) {
tmax = fmax(tmax, x[x_start + j]);
}
smem[tid] = tmax;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
}
// broadcast max to all threads
__syncthreads();
Real max = smem[0];
// sum_j(exp(x(i,j)-max))
// reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) {
tsum += exp(x[x_start + j] - max);
}
smem[tid] = tsum;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] += smem[tid + shift];
}
}
// broadcast sum to all threads
__syncthreads();
Real log_sum = log(smem[0]);
// normalize the row
for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) {
y[y_start + j] = x[x_start + j] - max - log_sum;
}
}
template<typename Real>
__global__
static void _splice(Real* y, const Real* x, const int32_cuda* off,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d_out.stride;
if (i < d_out.cols && j < d_out.rows) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if (src_row < 0)
src_row = 0;
if (src_row >= d_in.rows)
src_row = d_in.rows - 1;
y[index] = x[src_col + src_row * d_in.stride];
}
}
template<typename Real>
__global__
static void _take_mean(const Real* x, Real* y, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index1 = i + j * d_in.stride;
int32_cuda index2 = j + i * d_in.stride;
if (i <= j && j < d_in.rows) {
int32_cuda index_sp = (j * (j + 1) / 2) + i;
y[index_sp] = 0.5 * (x[index1] + x[index2]);
}
}
template<typename Real>
__global__
static void _take_lower(const Real* x, Real* y, MatrixDim d_in) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j > i || i >= d_in.rows)
return;
int index = i * d_in.stride + j;
Real val = x[index];
int index_sp = (i * (i + 1) / 2) + j;
y[index_sp] = val;
}
template<typename Real>
__global__
static void _take_upper(const Real* x, Real* y, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j < i || j >= d_in.rows)
return;
int32_cuda index = i * d_in.stride + j;
int32_cuda index_sp = (j * (j + 1) / 2) + i;
y[index_sp] = x[index];
}
template<typename Real>
__global__
static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
y[i] = x[index];
}
}
template<typename Real>
__global__
static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int j = blockIdx.y * blockDim.y + threadIdx.y; //
if (i < dim.cols && j < dim.rows) {
int dst_index = i + j * dim.stride, src_index;
if (j <= i) { // no transpose
src_index = (i * (i + 1) / 2) + j;
} else { // transpose.
src_index = (j * (j + 1) / 2) + i;
}
y[dst_index] = x[src_index];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d_out.stride;
if (i < d_out.cols && j < d_out.rows) {
int32_cuda src_col = copy_from[i];
if (src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j * d_in.stride];
} else {
y[index] = 1.0 / 0.0;
}
}
}
template<typename Real>
__global__
static void _one(Real* x, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
x[i] = 1.0;
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d_out.stride;
if (i < d_out.cols && j < d_out.rows) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row * d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d,
int stride_grad) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride, grad_index = i + j * stride_grad;
if (i < d.cols && j < d.rows) {
if (wei[index] == 0.0)
return; //skip L1 if zero weight!
Real l1_signed = l1;
if (wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
//simulate update
Real after = wei[index] - lr * grad[grad_index] - l1_signed;
if ((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[grad_index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id,
MatrixDim d) {
const int32_cuda i = blockIdx.x;
const int32_cuda base = i * d.stride;
const int32_cuda tid = threadIdx.x;
__shared__ Real smax[CU1DBLOCK];
__shared__ int32_cuda sidx[CU1DBLOCK];
Real tmax = -1e20;
int32_cuda tidx = -1;
// Loop over blocks for coalesced memory access.
for (int32_cuda j = tid; j < d.cols; j += CU1DBLOCK) {
const Real val = mat[base + j];
if (val > tmax) {
tmax = val;
tidx = j;
}
}
smax[tid] = tmax;
sidx[tid] = tidx;
// Parallel reduce
#pragma unroll
for (int32_cuda num_working_threads = CU1DBLOCK / 2;
num_working_threads >= warpSize; num_working_threads >>= 1) {
__syncthreads();
if (tid < num_working_threads) {
if (smax[tid + num_working_threads] > smax[tid]) {
smax[tid] = smax[tid + num_working_threads];
sidx[tid] = sidx[tid + num_working_threads];
}
}
}
// Warp reduce without __syncthreads()
// (note.: synchronizes implicitly within a warp at the multiprocessor)
if (tid < warpSize / 2) {
#pragma unroll
for (int32_cuda num_working_threads = warpSize / 2; num_working_threads > 0;
num_working_threads >>= 1) {
if (smax[tid + num_working_threads] > smax[tid]) {
smax[tid] = smax[tid + num_working_threads];
sidx[tid] = sidx[tid + num_working_threads];
}
}
}
if (tid == 0) {
if (vec_val) {
vec_val[i] = smax[0];
}
vec_id[i] = sidx[0];
}
}
template<typename Real>
__global__
static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out,
Real* vec_log_post, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 0)
return;
if (j < d.rows) {
int32_cuda index = vec_tgt[j] + j * d.stride;
Real value = mat_net_out[index];
if (value < 1e-20)
value = 1e-20;
vec_log_post[j] = log(value);
mat_net_out[index] -= 1.0;
}
}
template<typename Real>
__global__
static void _diff_softmax(Real* x, const MatrixDim dim, const Real* value,
const int value_stride, const Real* diff,
const int diff_stride) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int value_start = i * value_stride;
const int diff_start = i * diff_stride;
const int x_start = i * dim.stride;
// Loop along the matrix row. Reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < dim.cols; j += CU1DBLOCK) {
tsum += value[value_start + j] * diff[diff_start + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
ssum[tid] += ssum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// Broadcast result to all threads
__syncthreads();
const Real pe = ssum[0];
// Apply element-wise x = value * (diff - pe)
for (int j = tid; j < dim.cols; j += CU1DBLOCK) {
x[x_start + j] = value[value_start + j] * (diff[diff_start + j] - pe);
}
}
// Differentiate backward through the log softmax function.
// "out_value" is the log softmax output. Does, for each row i,
// in_deriv(i) = out_deriv(i) - sum(out_deriv(i)) .* exp(out_value(i))
// ???(i) is row-vector.
// CUDA thread layout: 1 thread block (CU1DBLOCK == 256 threads) per matrix-row.
template<typename Real>
__global__
static void _diff_log_softmax(const MatrixDim in_deriv_dim,
const Real* out_value, const int out_value_stride,
const Real* out_deriv, const int out_deriv_stride,
Real* in_deriv) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int out_value_start = i * out_value_stride;
const int out_deriv_start = i * out_deriv_stride;
const int in_deriv_start = i * in_deriv_dim.stride;
// Loop along the matrix row. Reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < in_deriv_dim.cols; j += CU1DBLOCK) {
tsum += out_deriv[out_deriv_start + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
ssum[tid] += ssum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// Broadcast result to all threads
__syncthreads();
const Real sum_e = ssum[0];
// Apply element-wise x = out_deriv - exp(value) * sum_e
for (int j = tid; j < in_deriv_dim.cols; j += CU1DBLOCK) {
in_deriv[in_deriv_start + j] = out_deriv[out_deriv_start + j]
- exp(out_value[out_value_start + j]) * sum_e;
}
}
/**
this function computes the core part of the LSTM nonlinearity.
@param [in] in A matrix, of dimension num_rows by 5*cell_dim
(i.e. its num-cols must be a multiple of 5).
The column-space is interpreted as 5
consecutive blocks, each of dimension cell_dim,
which we name:
(i_part, f_part, c_part, o_part, c_{t-1}).
If 'have_dropout_mask' is nonzero, each row of
'in' will have 3 extra elements, interpreted
as dropout masks/scales for i_t, f_t and o_t.
@param [in] params A matrix, of dimension 3 by cell_dim,
with rows containing the 3 diagonal parameter matrices
used in LSTMs, namely
w_{ic}, w_{fc} and w_{oc}.
@param [out] out A matrix, of dimension num_rows by 2*cell_dim.
The quantities c_t and m_t respectively are put there
(in two blocks of column-dimension cell_dim),
according to the following equations:
i_t = Sigmoid(i_part + w_{ic}*c_{t-1})
f_t = Sigmoid(f_part + w_{fc}*c_{t-1})
c_t = f_t*c_{t-1} + i_t * Tanh(c_part)
o_t = Sigmoid(o_part + w_{oc}*c_t)
m_t = o_t * Tanh(c_t)
We use 1D thread block with CU1DBLOCK threads.
It works best when cell_dim is a multiple of CU1DBLOCK.
We use 1d Grid. Each block is working on one row of the in and out matrices.
*/
template<typename Real>
__global__
static void _lstm_nonlinearity(const Real* in, const int in_stride,
const Real* params, const int params_stride,
const int out_stride, const int cell_dim,
const int have_dropout_mask, const int num_rows,
Real* out) {
const int tid = threadIdx.x;
const int i = blockIdx.x;
const Real* i_part = in + i * in_stride;
const Real* f_part = in + i * in_stride + cell_dim;
const Real* c_part = in + i * in_stride + cell_dim * 2;
const Real* o_part = in + i * in_stride + cell_dim * 3;
const Real* c_tm1 = in + i * in_stride + cell_dim * 4;
const Real* w_ic = params;
const Real* w_fc = params + params_stride;
const Real* w_oc = params + params_stride * 2;
Real* c_t = out + i * out_stride;
Real* m_t = out + i * out_stride + cell_dim;
Real i_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5] : 1),
f_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5 + 1] : 1),
o_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5 + 2] : 1);
for (int j = tid; j < cell_dim; j += CU1DBLOCK) {
Real c_tm1_j = c_tm1[j];
Real i_t_j = Real(1) / (Real(1) + exp(-i_part[j] - w_ic[j] * c_tm1_j));
Real f_t_j = Real(1) / (Real(1) + exp(-f_part[j] - w_fc[j] * c_tm1_j));
Real c_t_j = f_t_j * f_scale * c_tm1_j + i_t_j * i_scale * tanh(c_part[j]);
Real o_t_j = Real(1) / (Real(1) + exp(-o_part[j] - w_oc[j] * c_t_j));
c_t[j] = c_t_j;
m_t[j] = o_t_j * o_scale * tanh(c_t_j);
}
}
/**
This function does the 'backward' pass corresponding to the function
ComputeLstmNonlinearity. It's a little more complicated than you might
expect because of the 'self-repair' mechanism that we use to prevent the
sigmoid and tanh nonlinearities oversaturating, and because of the
average-activation and average-derivative stats that we store for these
nonlinearites (these stats are used both to control the self-repair
mechanism, and for diagnostic purposes).
Because the forward pass computes various intermediate values that are not
output, this function actually has to do the same computations as the
forward pass before it actually does the backprop.
In the following description, `C` is for `cell_dim`, `N` is for `num_rows`.
@param [in] input The same as in ComputeLstmNonlinearity().
A matrix, of dimension N by 5C (i.e. its num-cols must be
a multiple of 5). The column-space is interpreted as 5
consecutive blocks, each of dimension C, which we name:
(i_part, f_part, c_part, o_part, c_{t-1}).
If 'have_dropout_mask' is nonzero, each row of
'in' will have 3 extra elements, interpreted
as dropout masks/scales for i_t, f_t and o_t.
@param [in] params The same as in ComputeLstmNonlinearity().
A matrix, of dimension 3 by C, with rows containing the
three diagonal parameter matrices used in LSTMs, namely
w_{ic}, w_{fc} and w_{oc}.
@param [in] output_deriv
A matrix, of dimension N by 2C, containing the derivative
of the objective function we're backpropagating,
w.r.t. the quantities c_t and m_t (in two blocks of
column-dimension C).
@param [in] deriv_sum_in
This is used in the self-repair code to identify
oversaturated nonlinearities.
It is a matrix, of dimension 5 by C, corresponding to
the totals of the derivatives of the 5 sigmoid and tanh
nonlinearities, in they order they appear in the equations
in the documentation of ComputeLstmNonlinearity()
respectively,
they appear in the equations for (i_t, f_t, c_t, o_t, m_t).
This will be divided by 'count_in' to get the average
derivative value so far, for each of the nonlinearities.
@param [in] self_repair_config
A vector of dimension 10, containing the configuration of
the self-repair to be used for the 5 nonlinearities.
The first 5 elements are the self_repair_lower_threshold
values (typically 0.05 for sigmoid and 0.2 for tanh),
and the next 5 elements are the corresponding
self-repair-scales (typically 10^-5).
@param [in] count_in The data-count that corresponds to the stats in
'deriv_sum_in' at entry to the function.
This function should tolerate the count being zero
(in that case, it is free to do the self-repair or not,
as this should only happen on the 1st minibatch of each
training job).
@param [out] input_deriv
May be NULL; if not, this function writes, to this
location, the backpropagated derivative of the objective
function w.r.t. the 'input' matrix. This matrix should
have the same dimension as 'input' i.e. N by 5C. In
addition to the regular backpropagated derivative, the
output will include small values relating to 'self-repair'.
@param [out] params_deriv
May be NULL; if not, this is where this function *writes*
[not adds] the backpropagated derivative of the objective
function w.r.t. 'params'; it should have the same dimension
as 'params' (3 by C). (This matrix will then be processed
by the natural gradient code and added to the appropriate
copy of the parameter matrix, outside this function).
@param [out] value_sum_out
Must be NULL if params_deriv is NULL; if not, a matrix of
dimension 5 by C. This function *adds* to this location
the total value of each of the sigmoid/tanh nonlinearities
that it computes (this is for diagnostic purposes).
@param [out] deriv_sum_out
Must be NULL if params_deriv is NULL; if not, a matrix of
dimension 5 by C; this function *adds* to this location the
total of the derivative of each of the sigmoid/tanh
nonlinearities that it computes (this is for diagnostic
purposes and to control the self-repair). This function
should tolerate the case when 'deriv_sum_out' points to the
same data as 'deriv_sum_in'.
@param [out] self_repair_sum_out
Must be NULL if params_deriv is NULL; if not, a matrix of
dimension 5 by C; this function *writes* to this location
the sum of the number of times the self-repair code was
activated (integer values 0 <= k <= N). This will be
processed outside this function into self-repair stats for
diagnostics.
// Use 2D block (8x32 threads) as we need to compute column sum.
// Use 1D grid to cover the data matrix `cell_dim`.
*/
template<typename Real>
__global__
static void _diff_lstm_nonlinearity(const int cell_dim, const int have_dropout_mask,
const int num_rows,
const Real* input, const int input_stride,
const Real* params, const int params_stride,
const Real* output_deriv,
const int output_deriv_stride,
const double* deriv_sum_in,
const int deriv_sum_in_stride,
const Real* self_repair_config,
double count, Real* input_deriv,
const int input_deriv_stride,
Real* params_deriv,
const int params_deriv_stride,
double* value_sum_out,
const int value_sum_out_stride,
double* deriv_sum_out,
const int deriv_sum_out_stride,
Real* self_repair_sum_out,
const int self_repair_sum_out_stride) {
__shared__ Real smem[CU1DBLOCK];
const int j = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int grid_stride = gridDim.y * blockDim.y;
const int i0 = blockIdx.y * blockDim.y + threadIdx.y;
Real w_ic_deriv_sum = 0;
Real w_fc_deriv_sum = 0;
Real w_oc_deriv_sum = 0;
Real i_t_value_sum = 0, i_t_deriv_sum = 0;
Real f_t_value_sum = 0, f_t_deriv_sum = 0;
Real c_part_value_sum = 0, c_part_deriv_sum = 0;
Real o_t_value_sum = 0, o_t_deriv_sum = 0;
Real c_t_value_sum = 0, c_t_deriv_sum = 0;
bool update_sr[5];
if (j < cell_dim) {
const Real w_ic = params[j];
const Real w_fc = params[params_stride + j];
const Real w_oc = params[2 * params_stride + j];
const Real* sr_config = self_repair_config;
# pragma unroll
for (int i = 0; i < 5; i++) {
update_sr[i] =
deriv_sum_in[i * deriv_sum_in_stride + j] < sr_config[i] * count;
}
const Real i_t_self_repair = (update_sr[0] ? sr_config[5] : 0);
const Real f_t_self_repair = (update_sr[1] ? sr_config[6] : 0);
const Real c_part_self_repair = (update_sr[2] ? sr_config[7] : 0);
const Real o_t_self_repair = (update_sr[3] ? sr_config[8] : 0);
const Real c_t_self_repair = (update_sr[4] ? sr_config[9] : 0);
for (int i = i0; i < num_rows; i += grid_stride) {
const Real i_part = input[i * input_stride + j];
const Real f_part = input[i * input_stride + j + cell_dim];
const Real c_part = input[i * input_stride + j + 2 * cell_dim];
const Real o_part = input[i * input_stride + j + 3 * cell_dim];
const Real c_prev = input[i * input_stride + j + 4 * cell_dim];
const Real i_scale = (have_dropout_mask ?
input[i * input_stride + cell_dim * 5] : 1),
f_scale = (have_dropout_mask ?
input[i * input_stride + cell_dim * 5 + 1] :1),
o_scale = (have_dropout_mask ?
input[i * input_stride + cell_dim * 5 + 2] :1);
const Real i_t = Real(1) / (1 + exp(-i_part - w_ic * c_prev));
const Real f_t = Real(1) / (1 + exp(-f_part - w_fc * c_prev));
const Real tanh_c_part = tanh(c_part);
const Real c_t = f_t * f_scale * c_prev + i_t * i_scale * tanh_c_part;
const Real o_t = 1 / (1 + exp(-o_part - w_oc * c_t));
const Real tanh_c_t = tanh(c_t);
const Real i_t_deriv = i_t * (1 - i_t);
const Real f_t_deriv = f_t * (1 - f_t);
const Real c_part_deriv = 1 - tanh_c_part * tanh_c_part;
const Real o_t_deriv = o_t * (1 - o_t);
const Real c_t_deriv = 1 - tanh_c_t * tanh_c_t;
if (params_deriv) {
i_t_value_sum += i_t;
f_t_value_sum += f_t;
c_part_value_sum += tanh_c_part;
o_t_value_sum += o_t;
c_t_value_sum += tanh_c_t;
i_t_deriv_sum += i_t_deriv;
f_t_deriv_sum += f_t_deriv;
c_part_deriv_sum += c_part_deriv;
o_t_deriv_sum += o_t_deriv;
c_t_deriv_sum += c_t_deriv;
}
const Real dc_t_out = output_deriv[i * output_deriv_stride + j];
const Real dm_t = output_deriv[i * output_deriv_stride + j + cell_dim];
const Real dtanh_c_t = o_t * o_scale * dm_t;
const Real do_t = o_scale * tanh_c_t * dm_t;
const Real do_t_input = (o_t_deriv * do_t
- (2 * o_t - 1) * o_t_self_repair);
const Real dc_t = (c_t_deriv * dtanh_c_t + dc_t_out + do_t_input * w_oc)
- tanh_c_t * c_t_self_repair;
const Real dtanh_c_part = i_t * i_scale * dc_t;
const Real df_t = dc_t * f_scale * c_prev;
const Real df_t_input = (df_t * f_t_deriv
- (2 * f_t - 1) * f_t_self_repair);
const Real di_t = dc_t * i_scale * tanh_c_part;
const Real di_t_input = (di_t * i_t_deriv
- (2 * i_t - 1) * i_t_self_repair);
if (params_deriv) {
w_ic_deriv_sum += c_prev * di_t_input;
w_fc_deriv_sum += c_prev * df_t_input;
w_oc_deriv_sum += c_t * do_t_input;
}
const Real dc_prev = w_ic * di_t_input + w_fc * df_t_input + f_t * f_scale * dc_t;
const Real do_part = do_t_input;
const Real dc_part = (c_part_deriv * dtanh_c_part
- tanh_c_part * c_part_self_repair);
const Real df_part = df_t_input;
const Real di_part = di_t_input;
if (input_deriv) {
input_deriv[i * input_deriv_stride + j] = di_part;
input_deriv[i * input_deriv_stride + j + cell_dim] = df_part;
input_deriv[i * input_deriv_stride + j + cell_dim * 2] = dc_part;
input_deriv[i * input_deriv_stride + j + cell_dim * 3] = do_part;
input_deriv[i * input_deriv_stride + j + cell_dim * 4] = dc_prev;
}
}
}
if (params_deriv) {
// compute params_deriv
smem[tid] = w_ic_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
params_deriv[j] = smem[tid];
}
__syncthreads();
smem[tid] = w_fc_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
params_deriv[params_deriv_stride + j] = smem[tid];
}
__syncthreads();
smem[tid] = w_oc_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
params_deriv[2 * params_deriv_stride + j] = smem[tid];
}
// compute value_sum_out
__syncthreads();
smem[tid] = i_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[j] += smem[tid];
}
__syncthreads();
smem[tid] = f_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[value_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_part_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[2 * value_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = o_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[3 * value_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[4 * value_sum_out_stride + j] += smem[tid];
}
// need to update self_repair_sum_out before deriv_sum_out, because
// deriv_sum_out and deriv_sum_in might point to the same memory.
if (i0 < 5 && j < cell_dim) {
self_repair_sum_out[i0 * self_repair_sum_out_stride + j] =
update_sr[i0] ? num_rows : 0;
}
// compute derive_sum_out
__syncthreads();
smem[tid] = i_t_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[j] += smem[tid];
}
__syncthreads();
smem[tid] = f_t_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[deriv_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_part_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[2 * deriv_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = o_t_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[3 * deriv_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_t_deriv_sum;
__syncthreads();
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[4 * deriv_sum_out_stride + j] += smem[tid];
}
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cuda_int32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value,
MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cuda_int32_add(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value,
MatrixDim d) {
_add<<<Gr,Bl>>>(mat,value,d);
}
void cuda_int32_sequence(dim3 Gr, dim3 Bl, int32_cuda* data, int length,
int32_cuda base) {
_sequence<<<Gr, Bl>>>(data, length, base);
}
/*
* "float"
*/
/*
* CuMatrix
*/
void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {
_copy_upp_low<<<Gr,Bl>>>(A,dimA);}
void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {
_copy_low_upp<<<Gr,Bl>>>(A,dimA);}
void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat,
MatrixDim mat_dim, const float *vec,
const float *mat2, int mat2_row_stride,
int mat2_col_stride, float beta) {
_add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride,
mat2_col_stride, beta);
}
void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B,
MatrixDim dmat) {
_copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat);
}
void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B,
MatrixDim dmat) {
_copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat);
}
void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B,
MatrixDim dmat) {
_copy_from_tp<<<Gr,Bl>>>(A,B,dmat);
}
void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B,
MatrixDim dmat) {
_copy_from_tp<<<Gr,Bl>>>(A,B,dmat);
}
void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_exp<<<Gr,Bl>>>(mat,d);
}
void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) {
_apply_pow<<<Gr,Bl>>>(mat, power, d);
}
void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power,
bool include_sign, MatrixDim d) {
_apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d);
}
void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_heaviside<<<Gr,Bl>>>(mat, d);
}
void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst,
const float* const * src, MatrixDim dst_dim) {
_copy_rows<<<Gr,Bl>>>(dst, src, dst_dim);
}
void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const * dst,
const float* src, MatrixDim src_dim) {
_copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim);
}
void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride);
}
void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst,
const float* const * src, MatrixDim dst_dim) {
_add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim);
}
void cudaF_add_to_rows(dim3 Gr, dim3 Bl, float alpha,
float* dst, const float* src, const MatrixIndexT_cuda* reorder,
MatrixDim src_dim, int dst_stride) {
_add_to_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, src_dim, dst_stride);
}
void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const * dst,
const float* src, MatrixDim src_dim) {
_add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim);
}
void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val,
MatrixDim d) {
_apply_floor<<<Gr,Bl>>>(mat, floor_val, d);
}
void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val,
MatrixDim d) {
_apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d);
}
void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) {
_set_diag<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
_set_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
_add_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_set_zero_above_diag<<<Gr,Bl>>>(mat, d);
}
void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_add<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
_scale_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_scale<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A,
MatrixDim dst_d, int src_stride) {
_mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A,
MatrixDim dst_d, int src_stride) {
_div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d,
int src_stride) {
_max<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaF_min(dim3 Gr, dim3 Bl, float* mat, const float* other,
MatrixDim mat_d, int other_stride) {
_min<<<Gr,Bl>>>(mat,other,mat_d,other_stride);
}
void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale,
MatrixDim d) {
_mul_cols_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale,
MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x,
MatrixDim d, int src_stride, int group_size) {
_mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size);
}
void cudaF_diff_group_pnorm(dim3 Gr, dim3 Bl, float *id, const float *iv,
const float *ov, const float* od, MatrixDim id_dim,
int iv_stride, int ov_stride, int od_stride,
int group_size, float power) {
_diff_group_pnorm<<<Gr, Bl>>>(id, iv, ov, od, id_dim, iv_stride, ov_stride,
od_stride, group_size, power);
}
void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1,
const float *x2, MatrixDim y_dim, int x1_stride,
int x2_stride, int group_size) {
_calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, y_dim, x1_stride, x2_stride,
group_size);
}
void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div,
MatrixDim d) {
_div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst,
MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
_add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
} else {
_add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
}
}
void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src,
int32_cuda num_row_blocks, int32_cuda num_col_blocks,
float* dst, MatrixDim d, int src_stride,
int A_trans) {
if (A_trans) {
_add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks,
dst, d, src_stride);
} else {
_add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst,
d, src_stride);
}
}
void cudaF_add_mat_repeated(dim3 Gr, dim3 Bl, float alpha, const float* src,
MatrixDim src_dim, float *dst, MatrixDim dst_dim) {
_add_mat_repeated<<<Gr,Bl>>>(alpha, src, src_dim, dst, dst_dim);
}
void cudaF_set_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B,
const float *C, float *dst, MatrixDim d,
int stride_a, int stride_b, int stride_c) {
_set_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d, stride_a, stride_b, stride_c);
}
void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T,
MatrixDim tdim, float *S, MatrixDim sdim) {
_sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim);
}
void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col,
float beta, float* dst, MatrixDim d) {
_add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d);
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row,
float beta, float* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat,
MatrixDim mat_dim, const float *mat2,
int mat2_row_stride, int mat2_col_stride,
const float *vec, float beta) {
_add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride,
mat2_col_stride, vec, beta);
}
void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data,
const float *srcA_data, const float *srcB_data,
MatrixDim dim, int srcA_stride, int srcB_stride,
float alpha, float beta) {
_add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim,
srcA_stride, srcB_stride, alpha, beta);
}
// CURRENTLY UNUSED...
void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask,
MatrixDim dmat, MatrixDim dmask) {
_apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaF_max_mat_cols(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d) {
_transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d,
TransReduceOp<MAX,float>());
}
void cudaF_min_mat_cols(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d) {
_transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d,
TransReduceOp<MIN,float>());
}
void cudaF_sum_mat_cols(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d) {
_transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d,
TransReduceOp<SUM,float>());
}
void cudaF_add_col_sum_mat(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d, const float alpha,
const float beta) {
_transform_reduce_mat_cols<<<Gr, Bl>>>(result, mat, d,
TransReduceOp<SUMAB, float>(alpha, beta));
}
void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig,
float changed) {
_replace_value<<<Gr,Bl>>>(v, dim, orig, changed);
}
void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a,
float param_1, float param_2, float param_3,
int* flag, int dim) {
_set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim);
}
void cublas_copy_kaldi_fd(int Gr, int Bl, int n, const float* x, int incx,
double* y, int incy) {
_cublas_copy_kaldi<<<Gr,Bl>>>(n, x, incx, y, incy);
}
void cublas_copy_kaldi_df(int Gr, int Bl, int n, const double* x, int incx,
float* y, int incy) {
_cublas_copy_kaldi<<<Gr,Bl>>>(n, x, incx, y, incy);
}
void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) {
_vec_mul_elements<<<Gr,Bl>>>(v, a, dim);
}
void cudaF_vec_min(int Gr, int Bl, const float* v, float* value, int dim,
int inc) {
_vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc,
TransReduceOp<MIN, float>());
}
void cudaF_vec_max(int Gr, int Bl, const float* v, float* value, int dim,
int inc) {
_vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc,
TransReduceOp<MAX, float>());
}
void cudaF_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const float* A, const float* B,
MatrixDim dA, int B_stride, float* value) {
_trace_mat_mat_trans<<<Gr,Bl>>>(A,B,dA,B_stride,value);
}
void cudaF_trace_mat_mat(dim3 Gr, dim3 Bl, const float* A, const float* B,
MatrixDim dA, int B_stride, float* value) {
_trace_mat_mat<32> <<<Gr,Bl>>>(A,B,dA,B_stride,value);
}
void cudaF_add_diag_mat_mat_MNT(int Gr, int Bl, const float alpha,
const float* M, const MatrixDim dim_M,
const float* N, const int stride_N,
const float beta, float* v) {
_add_diag_mat_mat_MNT<<<Gr,Bl>>>(alpha,M,dim_M,N,stride_N,beta,v);
}
void cudaF_add_diag_mat_mat_MTN(dim3 Gr, dim3 Bl, const float alpha,
const float* M, const int stride_M,
const float* N, const MatrixDim dim_N,
const float beta, float* v) {
if (Bl.x == 16) {
_add_diag_mat_mat_MTN<16> <<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v);
} else if (Bl.x==32) {
_add_diag_mat_mat_MTN<32><<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v);
}
}
void cudaF_add_diag_mat_mat_MN(dim3 Gr, dim3 Bl, const float alpha,
const float* M, const int stride_M,
const float* N, const MatrixDim dim_N,
const float beta, float* v) {
if (Bl.x == 16) {
_add_diag_mat_mat_MN<16> <<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v);
} else if (Bl.x==32) {
_add_diag_mat_mat_MN<32><<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v);
}
}
void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x,
const float* y, float beta, int dim) {
_add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim);
}
void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) {
_vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc,
TransReduceOp<SUM, float>());
}
void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
float alpha, MatrixElement<float>* x,
int num_elements) {
_cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, num_elements);
}
void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim,
float alpha, const Int32Pair* indices,
const float* x, int s, float* data) {
_cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data);
}
void cudaF_matrix_add_to_elements(dim3 Gr, dim3 Bl, float alpha,
float* mat, MatrixDim dim,
const MatrixIndexT_cuda* elements) {
_cuda_matrix_add_to_elements<<<Gr, Bl>>>(alpha, mat, dim, elements);
}
void cudaF_vector_copy_elements(dim3 Gr, dim3 Bl, float *data, int dim,
const float *src_mat, int mat_stride,
bool transpose,
const MatrixIndexT_cuda* elements) {
_cuda_vector_copy_elements<<<Gr, Bl>>>(data, dim, src_mat, mat_stride,
transpose, elements);
}
void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s,
const float* z, MatrixDim d, float* z2, MatrixDim d2,
float* t) {
_cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t);
}
void cudaD_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<double>* x, int s,
const double* z, MatrixDim d, double* z2,
MatrixDim d2, double* t) {
_cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t);
}
void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst,
const float *src, int dim) {
_vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim);
}
void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val,
float *count, int dim) {
_vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim);
}
void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val,
float *count, int dim) {
_vec_apply_ceiling<<<Gr,Bl>>>(v, ceiling_val,count,dim);
}
void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) {
_vec_apply_exp<<<Gr,Bl>>>(v,dim);
}
void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) {
_vec_apply_log<<<Gr,Bl>>>(v,flag,dim);
}
void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) {
_invert_elements<<<Gr,Bl>>>(data, d);
}
void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d,
const float *Adata, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, float alpha, float beta,
int B_trans) {
if (B_trans) {
_add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
} else {
_add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
}
}
void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data,
int num_blocks, const float *C_data,
int C_num_cols, int C_row_stride, int C_col_stride,
const float *D_data, int D_row_stride,
int D_col_stride, float alpha, float beta) {
_block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols,
C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha,
beta);
}
/*
* cu::
*/
void cudaF_soft_hinge(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
_soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d,
int src_stride, int group_size, float power) {
_group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power);
}
void cudaF_group_spec_pnorm(dim3 Gr, dim3 Bl, float* y, const float* x,
MatrixDim d, int src_stride, int group_size,
float power) {
if (power == float(0)) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<L0NORM, float>());
} else if (power == float(1)) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<L1NORM, float>());
} else if (power == float(2)) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<L2NORM, float>());
} else if (power == std::numeric_limits<float>::infinity()) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<LINFNORM, float>());
} else {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<LPNORM, float>(power));
}
}
void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d,
int src_stride, int group_size) {
_group_transform_reduce<<<Gr,Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<MAX, float>());
}
void cudaF_sigmoid(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
_sigmoid<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_diff_sigmoid(dim3 Gr, dim3 Bl, float* eout, const float* e,
const float* y, MatrixDim d, int e_stride,
int y_stride) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride);
}
void cudaF_tanh(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
_tanh<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_diff_tanh(dim3 Gr, dim3 Bl, float* eout, const float* e,
const float* y, MatrixDim d, int e_stride, int y_stride) {
_diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride);
}
void cudaF_parametric_relu(dim3 Gr, dim3 Bl, float* y, const float* x,
MatrixDim d, int src_stride,
const float* a, const float* b) {
_parametric_relu<<<Gr,Bl>>>(y, x, d, src_stride, a, b);
}
void cudaF_diff_parametric_relu(dim3 Gr, dim3 Bl, float* eout, const float* e,
const float* y, MatrixDim d, int e_stride,
int y_stride, const float* a, const float* b) {
_diff_parametric_relu<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride, a, b);
}
void cudaF_heaviside(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
_heaviside<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_softmax_reduce(size_t Gr, size_t Bl, float* y, const float* x,
MatrixDim d, int src_stride) {
_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_log_softmax_reduce(size_t Gr, size_t Bl, float* y, const float* x,
MatrixDim y_dim, int x_stride) {
_log_softmax_reduce<<<Gr,Bl>>>(y, x, y_dim, x_stride);
}
void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x,
const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_splice<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaF_normalize_per_row(size_t Gr, size_t Bl, float *y, int y_stride,
const float *x, MatrixDim x_d, float target_rms,
bool add_log_stddev) {
_normalize_per_row<<<Gr, Bl>>>(y, y_stride, x, x_d, target_rms, add_log_stddev);
}
void cudaF_one(int Gr, int Bl, float* x, int dim) {
_one<<<Gr,Bl>>>(x,dim);
}
void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim d_in) {
_take_mean<<<Gr,Bl>>>(x,y,d_in);
}
void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim d_in) {
_take_lower<<<Gr,Bl>>>(x,y,d_in);
}
void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim d_in) {
_take_upper<<<Gr,Bl>>>(x,y,d_in);
}
void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim dim) {
_copy_from_sp<<<Gr,Bl>>>(x, y, dim);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x,
const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x,
const int32_cuda* copy_from, MatrixDim d_out,
MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1,
float lr, MatrixDim d, int stride_grad) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val,
int32_cuda* vec_id, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, d);
}
void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt,
float* mat_net_out, float* vec_log_post, MatrixDim d) {
_diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d);
}
void cudaF_diff_softmax(dim3 Gr, dim3 Bl, float* x, const MatrixDim dim,
const float* value, const int value_stride,
const float* diff, const int diff_stride) {
_diff_softmax<<<Gr, Bl>>>(x, dim, value, value_stride, diff, diff_stride);
}
void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out,
const float *v_in) {
_copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in);
}
void cudaF_diff_log_softmax(dim3 Gr, dim3 Bl, const MatrixDim in_deriv_dim,
const float* out_value, const int out_value_stride,
const float* out_deriv, const int out_deriv_stride,
float* in_deriv) {
_diff_log_softmax<<<Gr, Bl>>>(in_deriv_dim, out_value, out_value_stride,
out_deriv, out_deriv_stride, in_deriv);
}
void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col,
const float* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col,
const float* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
const float *src_data, MatrixDim src_dim,
const Int32Pair *indices) {
_sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices);
}
void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
const float *src_data, MatrixDim src_dim,
const Int32Pair *indexes) {
_add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes);
}
void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
float *output) {
_matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output);
}
void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1,
const float *mat2, float *mask,
MatrixDim mat1_dim, int mat2_stride,
int mask_stride) {
_equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride,
mask_stride);
}
/*
* "double"
*/
/*
* CuMatrix
*/
void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {
_copy_upp_low<<<Gr,Bl>>>(A,dimA);}
void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {
_copy_low_upp<<<Gr,Bl>>>(A,dimA);}
void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat,
MatrixDim mat_dim, const double *vec,
const double *mat2, int mat2_row_stride,
int mat2_col_stride, double beta) {
_add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride,
mat2_col_stride, beta);
}
void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B,
MatrixDim dmat) {
_copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat);
}
void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B,
MatrixDim dmat) {
_copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat);
}
void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B,
MatrixDim dmat) {
_copy_from_tp<<<Gr,Bl>>>(A,B,dmat);
}
void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B,
MatrixDim dmat) {
_copy_from_tp<<<Gr,Bl>>>(A,B,dmat);
}
void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_exp<<<Gr,Bl>>>(mat,d);
}
void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) {
_apply_pow<<<Gr,Bl>>>(mat, power, d);
}
void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power,
bool include_sign, MatrixDim d) {
_apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d);
}
void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_heaviside<<<Gr,Bl>>>(mat, d);
}
void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst,
const double* const * src, MatrixDim dst_dim) {
_copy_rows<<<Gr,Bl>>>(dst, src, dst_dim);
}
void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const * dst,
const double* src, MatrixDim src_dim) {
_copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim);
}
void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst,
const double* src, const MatrixIndexT_cuda* reorder,
MatrixDim dst_dim, int src_stride) {
_add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride);
}
void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst,
const double* const * src, MatrixDim dst_dim) {
_add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim);
}
void cudaD_add_to_rows(dim3 Gr, dim3 Bl, double alpha,
double* dst, const double* src, const MatrixIndexT_cuda* reorder,
MatrixDim src_dim, int dst_stride) {
_add_to_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, src_dim, dst_stride);
}
void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha,
double* const * dst, const double* src,
MatrixDim src_dim) {
_add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim);
}
void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val,
MatrixDim d) {
_apply_floor<<<Gr,Bl>>>(mat, floor_val, d);
}
void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val,
MatrixDim d) {
_apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d);
}
void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) {
_set_diag<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) {
_set_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) {
_add_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_set_zero_above_diag<<<Gr,Bl>>>(mat, d);
}
void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_add<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value,
int dim) {
_scale_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_scale<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A,
MatrixDim dst_d, int src_stride) {
_mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A,
MatrixDim dst_d, int src_stride) {
_div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d,
int src_stride) {
_max<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaD_min(dim3 Gr, dim3 Bl, double* mat, const double* other, MatrixDim mat_d,
int other_stride) {
_min<<<Gr,Bl>>>(mat,other,mat_d,other_stride);
}
void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale,
MatrixDim d) {
_mul_cols_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale,
MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size) {
_mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size);
}
void cudaD_diff_group_pnorm(dim3 Gr, dim3 Bl, double *id, const double *iv,
const double *ov, const double* od,
MatrixDim id_dim, int iv_stride, int ov_stride,
int od_stride, int group_size, double power) {
_diff_group_pnorm<<<Gr, Bl>>>(id, iv, ov, od, id_dim, iv_stride, ov_stride,
od_stride, group_size, power);
}
void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1,
const double* x2, MatrixDim y_dim,
int x1_stride, int x2_stride, int group_size) {
_calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, y_dim, x1_stride, x2_stride,
group_size);
}
void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div,
MatrixDim d) {
_div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src,
double* dst, MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
_add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
} else {
_add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
}
}
void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src,
int32_cuda num_row_blocks, int32_cuda num_col_blocks,
double* dst, MatrixDim d, int src_stride,
int A_trans) {
if (A_trans) {
_add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks,
dst, d, src_stride);
} else {
_add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst,
d, src_stride);
}
}
void cudaD_add_mat_repeated(dim3 Gr, dim3 Bl, double alpha, const double* src,
MatrixDim src_dim, double *dst, MatrixDim dst_dim) {
_add_mat_repeated<<<Gr,Bl>>>(alpha, src, src_dim, dst, dst_dim);
}
void cudaD_set_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A,
const double *B, const double *C, double *dst,
MatrixDim d, int stride_a, int stride_b,
int stride_c) {
_set_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d,stride_a,stride_b,stride_c);
}
void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta,
const double* T, MatrixDim tdim, double *S,
MatrixDim sdim) {
_sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim);
}
void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col,
double beta, double* dst, MatrixDim d) {
_add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row,
double beta, double* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat,
MatrixDim mat_dim, const double *mat2,
int mat2_row_stride, int mat2_col_stride,
const double *vec, double beta) {
_add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride,
mat2_col_stride, vec, beta);
}
void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data,
const double *srcA_data,
const double *srcB_data, MatrixDim dim,
int srcA_stride, int srcB_stride, double alpha,
double beta) {
_add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim,
srcA_stride, srcB_stride, alpha, beta);
}
// CURRENTLY UNUSED...
void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask,
MatrixDim dmat, MatrixDim dmask) {
_apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaD_max_mat_cols(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d) {
_transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d,
TransReduceOp<MAX,double>());
}
void cudaD_min_mat_cols(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d) {
_transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d,
TransReduceOp<MIN,double>());
}
void cudaD_sum_mat_cols(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d) {
_transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d,
TransReduceOp<SUM,double>());
}
void cudaD_add_col_sum_mat(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d, const double alpha,
const double beta) {
_transform_reduce_mat_cols<<<Gr, Bl>>>(result, mat, d,
TransReduceOp<SUMAB, double>(alpha, beta));
}
void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig,
double changed) {
_replace_value<<<Gr,Bl>>>(v, dim, orig, changed);
}
void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a,
double param_1, double param_2, double param_3,
int* flag, int dim) {
_set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim);
}
void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a,
int dim) {
_vec_mul_elements<<<Gr,Bl>>>(v, a, dim);
}
void cudaD_vec_min(int Gr, int Bl, const double* v, double* value, int dim,
int inc) {
_vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc,
TransReduceOp<MIN, double>());
}
void cudaD_vec_max(int Gr, int Bl, const double* v, double* value, int dim,
int inc) {
_vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc,
TransReduceOp<MAX, double>());
}
void cudaD_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const double* A,
const double* B, MatrixDim dA, int B_stride,
double* value) {
_trace_mat_mat_trans<<<Gr,Bl>>>(A,B,dA,B_stride,value);
}
void cudaD_trace_mat_mat(dim3 Gr, dim3 Bl, const double* A, const double* B,
MatrixDim dA, int B_stride, double* value) {
_trace_mat_mat<32> <<<Gr,Bl>>>(A,B,dA,B_stride,value);
}
void cudaD_add_diag_mat_mat_MNT(int Gr, int Bl, const double alpha,
const double* M, const MatrixDim dim_M,
const double* N, const int stride_N,
const double beta, double* v) {
_add_diag_mat_mat_MNT<<<Gr,Bl>>>(alpha,M,dim_M,N,stride_N,beta,v);
}
void cudaD_add_diag_mat_mat_MTN(dim3 Gr, dim3 Bl, const double alpha,
const double* M, const int stride_M,
const double* N, const MatrixDim dim_N,
const double beta, double* v) {
if (Bl.x == 16) {
_add_diag_mat_mat_MTN<16> <<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v);
} else if (Bl.x==32) {
_add_diag_mat_mat_MTN<32><<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v);
}
}
void cudaD_add_diag_mat_mat_MN(dim3 Gr, dim3 Bl, const double alpha,
const double* M, const int stride_M,
const double* N, const MatrixDim dim_N,
const double beta, double* v) {
if (Bl.x == 16) {
_add_diag_mat_mat_MN<16> <<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v);
} else if (Bl.x==32) {
_add_diag_mat_mat_MN<32><<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v);
}
}
void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x,
const double* y, double beta, int dim) {
_add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim);
}
void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col,
const double* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col,
const double* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) {
_vec_transform_reduce<<<Gr,Bl>>>(v,value,dim,inc,
TransReduceOp<SUM, double>());
}
void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
double alpha, MatrixElement<double>* x,
int num_elements) {
_cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, num_elements);
}
void cudaD_vector_copy_elements(dim3 Gr, dim3 Bl, double *data, int dim,
const double *src_mat, int mat_stride,
bool transpose,
const MatrixIndexT_cuda* elements) {
_cuda_vector_copy_elements<<<Gr, Bl>>>(data, dim, src_mat, mat_stride,
transpose, elements);
}
void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim,
double alpha, const Int32Pair* indices,
const double* x, int s, double* data) {
_cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data);
}
void cudaD_matrix_add_to_elements(dim3 Gr, dim3 Bl, double alpha,
double* mat, MatrixDim dim,
const MatrixIndexT_cuda* elements) {
_cuda_matrix_add_to_elements<<<Gr, Bl>>>(alpha, mat, dim, elements);
}
void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst,
const double *src, int dim) {
_vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim);
}
void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val,
float *count, int dim) {
_vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim);
}
void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val,
float *count, int dim) {
_vec_apply_ceiling<<<Gr,Bl>>>(v,ceiling_val,count,dim);
}
void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) {
_vec_apply_exp<<<Gr,Bl>>>(v,dim);
}
void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) {
_vec_apply_log<<<Gr,Bl>>>(v,flag,dim);
}
void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) {
_invert_elements<<<Gr,Bl>>>(data, d);
}
void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d,
const double *Adata, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, double alpha, double beta,
int B_trans) {
if (B_trans) {
_add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
} else {
_add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
}
}
void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data,
int num_blocks, const double *C_data,
int C_num_cols, int C_row_stride, int C_col_stride,
const double *D_data, int D_row_stride,
int D_col_stride, double alpha, double beta) {
_block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols,
C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride,
alpha, beta);
}
/*
* cu::
*/
void cudaD_soft_hinge(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
_soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size,
double power) {
_group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power);
}
void cudaD_group_spec_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size,
double power) {
if (power == double(0)) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<L0NORM, double>());
} else if (power == double(1)) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<L1NORM, double>());
} else if (power == double(2)) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<L2NORM, double>());
} else if (power == std::numeric_limits<double>::infinity()) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<LINFNORM, double>());
} else {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<LPNORM, double>(power));
}
}
void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride, int group_size) {
_group_transform_reduce<<<Gr,Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<MAX, double>());
}
void cudaD_sigmoid(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
_sigmoid<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_diff_sigmoid(dim3 Gr, dim3 Bl, double* eout, const double* e,
const double* y, MatrixDim d, int e_stride,
int y_stride) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride);
}
void cudaD_tanh(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
_tanh<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_diff_tanh(dim3 Gr, dim3 Bl, double* eout, const double* e,
const double* y, MatrixDim d, int e_stride, int y_stride) {
_diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride);
}
void cudaD_parametric_relu(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride,
const double* a, const double* b) {
_parametric_relu<<<Gr,Bl>>>(y, x, d, src_stride, a, b);
}
void cudaD_diff_parametric_relu(dim3 Gr, dim3 Bl, double* eout, const double* e,
const double* y, MatrixDim d, int e_stride,
int y_stride, const double* a, const double* b) {
_diff_parametric_relu<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride, a, b);
}
void cudaD_heaviside(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
_heaviside<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_softmax_reduce(size_t Gr, size_t Bl, double* y, const double* x,
MatrixDim d, int src_stride) {
_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_log_softmax_reduce(size_t Gr, size_t Bl, double* y, const double* x,
MatrixDim y_dim, int x_stride) {
_log_softmax_reduce<<<Gr,Bl>>>(y, x, y_dim, x_stride);
}
void cudaD_normalize_per_row(size_t Gr, size_t Bl, double *y, int y_stride,
const double *x, MatrixDim x_d, double target_rms,
bool add_log_stddev) {
_normalize_per_row<<<Gr, Bl>>>(y, y_stride, x, x_d, target_rms, add_log_stddev);
}
void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x,
const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_splice<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaD_one(int Gr, int Bl, double* x, int dim) {
_one<<<Gr,Bl>>>(x,dim);
}
void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_in) {
_take_mean<<<Gr,Bl>>>(x,y,d_in);
}
void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_in) {
_take_lower<<<Gr,Bl>>>(x,y,d_in);
}
void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_in) {
_take_upper<<<Gr,Bl>>>(x,y,d_in);
}
void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_out) {
_copy_from_sp<<<Gr,Bl>>>(x,y,d_out);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x,
const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x,
const int32_cuda* copy_from, MatrixDim d_out,
MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1,
double lr, MatrixDim d, int stride_grad) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val,
int32_cuda* vec_id, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, d);
}
void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt,
double* mat_net_out, double* vec_log_post, MatrixDim d) {
_diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d);
}
void cudaD_diff_softmax(dim3 Gr, dim3 Bl, double* x, const MatrixDim dim,
const double* value, const int value_stride,
const double* diff, const int diff_stride) {
_diff_softmax<<<Gr, Bl>>>(x, dim, value, value_stride, diff, diff_stride);
}
void cudaD_diff_log_softmax(dim3 Gr, dim3 Bl, const MatrixDim in_deriv_dim,
const double* out_value, const int out_value_stride,
const double* out_deriv, const int out_deriv_stride,
double* in_deriv) {
_diff_log_softmax<<<Gr, Bl>>>(in_deriv_dim, out_value, out_value_stride,
out_deriv, out_deriv_stride, in_deriv);
}
void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out,
MatrixDim d_out, const double *v_in) {
_copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in);
}
void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
const double *src_data, MatrixDim src_dim,
const Int32Pair *indices) {
_sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices);
}
void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
const double *src_data, MatrixDim src_dim,
const Int32Pair *indexes) {
_add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes);
}
void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
double *output) {
_matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output);
}
void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1,
const double *mat2, double *mask,
MatrixDim mat1_dim, int mat2_stride,
int mask_stride) {
_equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride,
mask_stride);
}
// Some conversion kernels for which it's more convenient
// to not name them F or D.
void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat_trans<32> <<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat_trans<32> <<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat_trans<32> <<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat_trans<32> <<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const float* smat_val) {
_copy_from_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const double* smat_val) {
_copy_from_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const float* smat_val) {
_copy_from_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const double* smat_val) {
_copy_from_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const float* smat_val) {
_copy_from_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const double* smat_val) {
_copy_from_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const float* smat_val) {
_copy_from_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const double* smat_val) {
_copy_from_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const float* smat_val, float* trace_vec) {
_trace_mat_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx, const float* smat_val,
float* trace_vec) {
_trace_mat_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val,
double* trace_vec) {
_trace_mat_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val,
double* trace_vec) {
_trace_mat_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaD_lstm_nonlinearity(dim3 Gr, dim3 Bl, const double* in,
const int in_stride, const double* params,
const int params_stride, const int out_stride,
const int cell_dim, const int have_dropout_mask,
const int num_rows, double* out) {
_lstm_nonlinearity<<<Gr, Bl>>>(
in, in_stride, params, params_stride,
out_stride, cell_dim, have_dropout_mask, num_rows, out);
}
void cudaF_lstm_nonlinearity(dim3 Gr, dim3 Bl, const float* in,
const int in_stride, const float* params,
const int params_stride, const int out_stride,
const int cell_dim, const int have_dropout_mask,
const int num_rows, float* out) {
_lstm_nonlinearity<<<Gr, Bl>>>(
in, in_stride, params, params_stride,
out_stride, cell_dim, have_dropout_mask, num_rows, out);
}
void cudaD_diff_lstm_nonlinearity(dim3 Gr, dim3 Bl, const int cell_dim,
const int have_dropout_mask,
const int num_rows, const double* input,
const int input_stride, const double* params,
const int params_stride,
const double* output_deriv,
const int output_deriv_stride,
const double* deriv_sum_in,
const int deriv_sum_in_stride,
const double* self_repair_config,
double count, double* input_deriv,
const int input_deriv_stride,
double* params_deriv,
const int params_deriv_stride,
double* value_sum_out,
const int value_sum_out_stride,
double* deriv_sum_out,
const int deriv_sum_out_stride,
double* self_repair_sum_out,
const int self_repair_sum_out_stride) {
_diff_lstm_nonlinearity<<<Gr, Bl>>>(
cell_dim, have_dropout_mask, num_rows, input,
input_stride, params, params_stride, output_deriv, output_deriv_stride,
deriv_sum_in, deriv_sum_in_stride, self_repair_config, count, input_deriv,
input_deriv_stride, params_deriv, params_deriv_stride, value_sum_out,
value_sum_out_stride, deriv_sum_out, deriv_sum_out_stride,
self_repair_sum_out, self_repair_sum_out_stride);
}
void cudaF_diff_lstm_nonlinearity(dim3 Gr, dim3 Bl, const int cell_dim,
const int have_dropout_mask,
const int num_rows, const float* input,
const int input_stride, const float* params,
const int params_stride,
const float* output_deriv,
const int output_deriv_stride,
const double* deriv_sum_in,
const int deriv_sum_in_stride,
const float* self_repair_config, double count,
float* input_deriv,
const int input_deriv_stride,
float* params_deriv,
const int params_deriv_stride,
double* value_sum_out,
const int value_sum_out_stride,
double* deriv_sum_out,
const int deriv_sum_out_stride,
float* self_repair_sum_out,
const int self_repair_sum_out_stride) {
_diff_lstm_nonlinearity<<<Gr, Bl>>>(
cell_dim, have_dropout_mask, num_rows, input,
input_stride, params, params_stride, output_deriv, output_deriv_stride,
deriv_sum_in, deriv_sum_in_stride, self_repair_config, count, input_deriv,
input_deriv_stride, params_deriv, params_deriv_stride, value_sum_out,
value_sum_out_stride, deriv_sum_out, deriv_sum_out_stride,
self_repair_sum_out, self_repair_sum_out_stride);
}
void cudaD_copy_cols_from_vec(dim3 Gr, dim3 Bl, double *mat_out,
MatrixDim d_out, const double *v_in) {
_copy_cols_from_vec<<<Gr, Bl>>>(mat_out, d_out, v_in);
}
void cudaF_copy_cols_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out,
const float *v_in) {
_copy_cols_from_vec<<<Gr, Bl>>>(mat_out, d_out, v_in);
}
void cudaF_diff_normalize_per_row(size_t Gr, size_t Bl, float *id,
int id_stride, const float *iv,
MatrixDim iv_dim, const float* od,
int od_stride, float target_rms,
bool add_log_stddev) {
_diff_normalize_per_row<<<Gr, Bl>>>(id, id_stride, iv, iv_dim, od, od_stride,
target_rms, add_log_stddev);
}
void cudaD_diff_normalize_per_row(size_t Gr, size_t Bl, double *id,
int id_stride, const double *iv,
MatrixDim iv_dim, const double* od,
int od_stride, double target_rms,
bool add_log_stddev) {
_diff_normalize_per_row<<<Gr, Bl>>>(id, id_stride, iv, iv_dim, od, od_stride,
target_rms, add_log_stddev);
}
void cudaD_select_rows(dim3 Gr, dim3 Bl, const int* out_row_ptr,
int* out_col_idx, double* out_val,
const int* row_indexes, const int num_selected_rows,
const int* in_row_ptr, const int* in_col_idx,
const double* in_val) {
_select_rows<<<Gr, Bl>>>(out_row_ptr, out_col_idx, out_val, row_indexes,
num_selected_rows, in_row_ptr, in_col_idx, in_val);
}
void cudaF_select_rows(dim3 Gr, dim3 Bl, const int* out_row_ptr,
int* out_col_idx, float* out_val, const int* row_indexes,
const int num_selected_rows, const int* in_row_ptr,
const int* in_col_idx, const float* in_val) {
_select_rows<<<Gr, Bl>>>(out_row_ptr, out_col_idx, out_val, row_indexes,
num_selected_rows, in_row_ptr, in_col_idx, in_val);
}
void cudaD_add_smat(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
double alpha, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val) {
_add_smat<<<Gr, Bl>>>(mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaF_add_smat(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
float alpha, const int* smat_row_ptr,
const int* smat_col_idx, const float* smat_val) {
_add_smat<<<Gr, Bl>>>(mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaD_add_smat_trans(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
double alpha, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val) {
_add_smat_trans<<<Gr, Bl>>>(mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaF_add_smat_trans(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
float alpha, const int* smat_row_ptr,
const int* smat_col_idx, const float* smat_val) {
_add_smat_trans<<<Gr, Bl>>>(mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaD_apply_exp_special(dim3 Gr, dim3 Bl, double* out, MatrixDim out_dim,
const double* in, int in_stride) {
_apply_exp_special<<<Gr, Bl>>>(out, out_dim, in, in_stride);
}
void cudaF_apply_exp_special(dim3 Gr, dim3 Bl, float* out, MatrixDim out_dim,
const float* in, int in_stride) {
_apply_exp_special<<<Gr, Bl>>>(out, out_dim, in, in_stride);
}
|
a1abe0feae695fcd09d7068a8e822f65af90e5fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
__global__ void Run(int n, float* __restrict left, float* __restrict right, float* __restrict output) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) output[i] = left[i] / right[i];
}
} | a1abe0feae695fcd09d7068a8e822f65af90e5fe.cu | extern "C" {
__global__ void Run(int n, float* __restrict left, float* __restrict right, float* __restrict output) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) output[i] = left[i] / right[i];
}
} |
d018648d24b84a99c9523509fd747a572f7a1532.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/thrust_rmm_allocator.h>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/scan.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <cmath> // for ::ceil()
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
namespace {
template <typename T>
using VectorT = rmm::device_vector<T>;
/**
* @brief Handles the "degenerate" case num_partitions >= num_rows.
*
* Specifically,
* If num_partitions == nrows:
* Then, offsets = [0..nrows-1]
* gather_row_indices = rotate [0..nrows-1] right by start_partition positions;
*
* If num_partitions > nrows:
* Then, let:
* dbg = generate a directed bipartite graph with num_partitions nodes and nrows edges,
* so that node j has an edge to node (j+start_partition) % num_partitions, for j = 0,...,nrows-1;
*
* transpose_dbg = transpose graph of dbg; (i.e., (i -> j) edge in dbg means (j -> i) edge in
* transpose);
*
* (offsets, indices) = (row_offsets, col_indices) of transpose_dbg;
* where (row_offsets, col_indices) are the CSR format of the graph;
*
* @Param[in] input The input table to be round-robin partitioned
* @Param[in] num_partitions Number of partitions for the table
* @Param[in] start_partition Index of the 1st partition
* @Param[in] mr Device memory resource used to allocate the returned table's device memory
* @Param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @Returns A std::pair consisting of a unique_ptr to the partitioned table and the partition
* offsets for each partition within the table
*/
std::pair<std::unique_ptr<cudf::table>, std::vector<cudf::size_type>> degenerate_partitions(
cudf::table_view const& input,
cudf::size_type num_partitions,
cudf::size_type start_partition,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto nrows = input.num_rows();
// iterator for partition index rotated right by start_partition positions:
//
auto rotated_iter_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator<cudf::size_type>(0),
[num_partitions, start_partition] __device__(auto index) {
return (index + num_partitions - start_partition) % num_partitions;
});
if (num_partitions == nrows) {
VectorT<cudf::size_type> partition_offsets(num_partitions, cudf::size_type{0});
auto exec = rmm::exec_policy(stream);
thrust::sequence(exec->on(stream), partition_offsets.begin(), partition_offsets.end());
auto uniq_tbl = cudf::detail::gather(input,
rotated_iter_begin,
rotated_iter_begin + nrows, // map
false,
mr,
stream);
auto ret_pair =
std::make_pair(std::move(uniq_tbl), std::vector<cudf::size_type>(num_partitions));
CUDA_TRY(hipMemcpyAsync(ret_pair.second.data(),
partition_offsets.data().get(),
sizeof(cudf::size_type) * num_partitions,
hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
return ret_pair;
} else { //( num_partitions > nrows )
VectorT<cudf::size_type> d_row_indices(nrows, cudf::size_type{0});
// copy rotated right partition indexes that
// fall in the interval [0, nrows):
//(this relies on a _stable_ copy_if())
//
auto exec = rmm::exec_policy(stream);
thrust::copy_if(exec->on(stream),
rotated_iter_begin,
rotated_iter_begin + num_partitions,
d_row_indices.begin(),
[nrows] __device__(auto index) { return (index < nrows); });
//...and then use the result, d_row_indices, as gather map:
//
auto uniq_tbl = cudf::detail::gather(input,
d_row_indices.begin(),
d_row_indices.end(), // map
false,
mr,
stream);
auto ret_pair =
std::make_pair(std::move(uniq_tbl), std::vector<cudf::size_type>(num_partitions));
// offsets (part 1: compute partition sizes);
// iterator for number of edges of the transposed bipartite graph;
// this composes rotated_iter transform (above) iterator with
// calculating number of edges of transposed bi-graph:
//
auto nedges_iter_begin = thrust::make_transform_iterator(
rotated_iter_begin, [nrows] __device__(auto index) { return (index < nrows ? 1 : 0); });
// offsets (part 2: compute partition offsets):
//
VectorT<cudf::size_type> partition_offsets(num_partitions, cudf::size_type{0});
thrust::exclusive_scan(exec->on(stream),
nedges_iter_begin,
nedges_iter_begin + num_partitions,
partition_offsets.begin());
CUDA_TRY(hipMemcpyAsync(ret_pair.second.data(),
partition_offsets.data().get(),
sizeof(cudf::size_type) * num_partitions,
hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
return ret_pair;
}
}
} // namespace
namespace cudf {
namespace detail {
std::pair<std::unique_ptr<table>, std::vector<cudf::size_type>> round_robin_partition(
table_view const& input,
cudf::size_type num_partitions,
cudf::size_type start_partition = 0,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
auto nrows = input.num_rows();
CUDF_EXPECTS(num_partitions > 0, "Incorrect number of partitions. Must be greater than 0.");
CUDF_EXPECTS(start_partition < num_partitions,
"Incorrect start_partition index. Must be less than number of partitions.");
CUDF_EXPECTS(
start_partition >= 0,
"Incorrect start_partition index. Must be positive."); // since cudf::size_type is an alias for
// int32_t, it _can_ be negative
// handle degenerate case:
//
if (num_partitions >= nrows) {
return degenerate_partitions(input, num_partitions, start_partition, mr, stream);
}
auto np_max_size = nrows % num_partitions; //# partitions of max size
// handle case when nr `mod` np == 0;
// fix for bug: https://github.com/rapidsai/cudf/issues/4043
auto num_partitions_max_size = (np_max_size > 0 ? np_max_size : num_partitions);
cudf::size_type max_partition_size = ::ceil(
static_cast<double>(nrows) / static_cast<double>(num_partitions)); // max size of partitions
auto total_max_partitions_size = num_partitions_max_size * max_partition_size;
auto num_partitions_min_size = num_partitions - num_partitions_max_size;
// delta is the number of positions to rotate right
// the original range [0,1,...,n-1]
// and is calculated by accumulating the first
//`start_partition` partition sizes from the end;
// i.e.,
// the partition sizes array (of size p) being:
//[m,m,...,m,(m-1),...,(m-1)]
//(with num_partitions_max_size sizes `m` at the beginning;
// and (p-num_partitions_max_size) sizes `(m-1)` at the end)
// we accumulate the 1st `start_partition` entries from the end:
//
auto delta = (start_partition > num_partitions_min_size
? num_partitions_min_size * (max_partition_size - 1) +
(start_partition - num_partitions_min_size) * max_partition_size
: start_partition * (max_partition_size - 1));
auto iter_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator<cudf::size_type>(0),
[nrows,
num_partitions,
max_partition_size,
num_partitions_max_size,
total_max_partitions_size,
delta] __device__(auto index0) {
// rotate original index right by delta positions;
// this is the effect of applying start_partition:
//
auto rotated_index = (index0 + nrows - delta) % nrows;
// using rotated_index = given index0, rotated;
// the algorithm below calculates the src round-robin row,
// by calculating the partition_index and the index_within_partition:
//
auto index_within_partition =
(rotated_index <= total_max_partitions_size
? rotated_index % max_partition_size
: (rotated_index - total_max_partitions_size) % (max_partition_size - 1));
auto partition_index =
(rotated_index <= total_max_partitions_size
? rotated_index / max_partition_size
: num_partitions_max_size +
(rotated_index - total_max_partitions_size) / (max_partition_size - 1));
return num_partitions * index_within_partition + partition_index;
});
auto uniq_tbl = cudf::detail::gather(input, iter_begin, iter_begin + nrows, false, mr, stream);
auto ret_pair = std::make_pair(std::move(uniq_tbl), std::vector<cudf::size_type>(num_partitions));
// this has the effect of rotating the set of partition sizes
// right by start_partition positions:
//
auto rotated_iter_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator<cudf::size_type>(0),
[num_partitions, start_partition, max_partition_size, num_partitions_max_size](auto index) {
return ((index + num_partitions - start_partition) % num_partitions < num_partitions_max_size
? max_partition_size
: max_partition_size - 1);
});
// then exclusive_scan on the resulting
// rotated partition sizes to get the partition offsets
// corresponding to start_partition:
// Since:
//"num_partitions is usually going to be relatively small
//(<1,000), as such, it's probably more expensive to do this on the device.
// Instead, do it on the host directly into the std::vector and avoid the memcpy." - JH
//
thrust::exclusive_scan(
thrust::host, rotated_iter_begin, rotated_iter_begin + num_partitions, ret_pair.second.begin());
return ret_pair;
}
} // namespace detail
std::pair<std::unique_ptr<cudf::table>, std::vector<cudf::size_type>> round_robin_partition(
table_view const& input,
cudf::size_type num_partitions,
cudf::size_type start_partition = 0,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource())
{
CUDF_FUNC_RANGE();
return cudf::detail::round_robin_partition(input, num_partitions, start_partition, mr);
}
} // namespace cudf
| d018648d24b84a99c9523509fd747a572f7a1532.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/thrust_rmm_allocator.h>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/types.hpp>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/scan.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <cmath> // for std::ceil()
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
namespace {
template <typename T>
using VectorT = rmm::device_vector<T>;
/**
* @brief Handles the "degenerate" case num_partitions >= num_rows.
*
* Specifically,
* If num_partitions == nrows:
* Then, offsets = [0..nrows-1]
* gather_row_indices = rotate [0..nrows-1] right by start_partition positions;
*
* If num_partitions > nrows:
* Then, let:
* dbg = generate a directed bipartite graph with num_partitions nodes and nrows edges,
* so that node j has an edge to node (j+start_partition) % num_partitions, for j = 0,...,nrows-1;
*
* transpose_dbg = transpose graph of dbg; (i.e., (i -> j) edge in dbg means (j -> i) edge in
* transpose);
*
* (offsets, indices) = (row_offsets, col_indices) of transpose_dbg;
* where (row_offsets, col_indices) are the CSR format of the graph;
*
* @Param[in] input The input table to be round-robin partitioned
* @Param[in] num_partitions Number of partitions for the table
* @Param[in] start_partition Index of the 1st partition
* @Param[in] mr Device memory resource used to allocate the returned table's device memory
* @Param[in] stream CUDA stream used for device memory operations and kernel launches.
*
* @Returns A std::pair consisting of a unique_ptr to the partitioned table and the partition
* offsets for each partition within the table
*/
std::pair<std::unique_ptr<cudf::table>, std::vector<cudf::size_type>> degenerate_partitions(
cudf::table_view const& input,
cudf::size_type num_partitions,
cudf::size_type start_partition,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto nrows = input.num_rows();
// iterator for partition index rotated right by start_partition positions:
//
auto rotated_iter_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator<cudf::size_type>(0),
[num_partitions, start_partition] __device__(auto index) {
return (index + num_partitions - start_partition) % num_partitions;
});
if (num_partitions == nrows) {
VectorT<cudf::size_type> partition_offsets(num_partitions, cudf::size_type{0});
auto exec = rmm::exec_policy(stream);
thrust::sequence(exec->on(stream), partition_offsets.begin(), partition_offsets.end());
auto uniq_tbl = cudf::detail::gather(input,
rotated_iter_begin,
rotated_iter_begin + nrows, // map
false,
mr,
stream);
auto ret_pair =
std::make_pair(std::move(uniq_tbl), std::vector<cudf::size_type>(num_partitions));
CUDA_TRY(cudaMemcpyAsync(ret_pair.second.data(),
partition_offsets.data().get(),
sizeof(cudf::size_type) * num_partitions,
cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
return ret_pair;
} else { //( num_partitions > nrows )
VectorT<cudf::size_type> d_row_indices(nrows, cudf::size_type{0});
// copy rotated right partition indexes that
// fall in the interval [0, nrows):
//(this relies on a _stable_ copy_if())
//
auto exec = rmm::exec_policy(stream);
thrust::copy_if(exec->on(stream),
rotated_iter_begin,
rotated_iter_begin + num_partitions,
d_row_indices.begin(),
[nrows] __device__(auto index) { return (index < nrows); });
//...and then use the result, d_row_indices, as gather map:
//
auto uniq_tbl = cudf::detail::gather(input,
d_row_indices.begin(),
d_row_indices.end(), // map
false,
mr,
stream);
auto ret_pair =
std::make_pair(std::move(uniq_tbl), std::vector<cudf::size_type>(num_partitions));
// offsets (part 1: compute partition sizes);
// iterator for number of edges of the transposed bipartite graph;
// this composes rotated_iter transform (above) iterator with
// calculating number of edges of transposed bi-graph:
//
auto nedges_iter_begin = thrust::make_transform_iterator(
rotated_iter_begin, [nrows] __device__(auto index) { return (index < nrows ? 1 : 0); });
// offsets (part 2: compute partition offsets):
//
VectorT<cudf::size_type> partition_offsets(num_partitions, cudf::size_type{0});
thrust::exclusive_scan(exec->on(stream),
nedges_iter_begin,
nedges_iter_begin + num_partitions,
partition_offsets.begin());
CUDA_TRY(cudaMemcpyAsync(ret_pair.second.data(),
partition_offsets.data().get(),
sizeof(cudf::size_type) * num_partitions,
cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
return ret_pair;
}
}
} // namespace
namespace cudf {
namespace detail {
std::pair<std::unique_ptr<table>, std::vector<cudf::size_type>> round_robin_partition(
table_view const& input,
cudf::size_type num_partitions,
cudf::size_type start_partition = 0,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
auto nrows = input.num_rows();
CUDF_EXPECTS(num_partitions > 0, "Incorrect number of partitions. Must be greater than 0.");
CUDF_EXPECTS(start_partition < num_partitions,
"Incorrect start_partition index. Must be less than number of partitions.");
CUDF_EXPECTS(
start_partition >= 0,
"Incorrect start_partition index. Must be positive."); // since cudf::size_type is an alias for
// int32_t, it _can_ be negative
// handle degenerate case:
//
if (num_partitions >= nrows) {
return degenerate_partitions(input, num_partitions, start_partition, mr, stream);
}
auto np_max_size = nrows % num_partitions; //# partitions of max size
// handle case when nr `mod` np == 0;
// fix for bug: https://github.com/rapidsai/cudf/issues/4043
auto num_partitions_max_size = (np_max_size > 0 ? np_max_size : num_partitions);
cudf::size_type max_partition_size = std::ceil(
static_cast<double>(nrows) / static_cast<double>(num_partitions)); // max size of partitions
auto total_max_partitions_size = num_partitions_max_size * max_partition_size;
auto num_partitions_min_size = num_partitions - num_partitions_max_size;
// delta is the number of positions to rotate right
// the original range [0,1,...,n-1]
// and is calculated by accumulating the first
//`start_partition` partition sizes from the end;
// i.e.,
// the partition sizes array (of size p) being:
//[m,m,...,m,(m-1),...,(m-1)]
//(with num_partitions_max_size sizes `m` at the beginning;
// and (p-num_partitions_max_size) sizes `(m-1)` at the end)
// we accumulate the 1st `start_partition` entries from the end:
//
auto delta = (start_partition > num_partitions_min_size
? num_partitions_min_size * (max_partition_size - 1) +
(start_partition - num_partitions_min_size) * max_partition_size
: start_partition * (max_partition_size - 1));
auto iter_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator<cudf::size_type>(0),
[nrows,
num_partitions,
max_partition_size,
num_partitions_max_size,
total_max_partitions_size,
delta] __device__(auto index0) {
// rotate original index right by delta positions;
// this is the effect of applying start_partition:
//
auto rotated_index = (index0 + nrows - delta) % nrows;
// using rotated_index = given index0, rotated;
// the algorithm below calculates the src round-robin row,
// by calculating the partition_index and the index_within_partition:
//
auto index_within_partition =
(rotated_index <= total_max_partitions_size
? rotated_index % max_partition_size
: (rotated_index - total_max_partitions_size) % (max_partition_size - 1));
auto partition_index =
(rotated_index <= total_max_partitions_size
? rotated_index / max_partition_size
: num_partitions_max_size +
(rotated_index - total_max_partitions_size) / (max_partition_size - 1));
return num_partitions * index_within_partition + partition_index;
});
auto uniq_tbl = cudf::detail::gather(input, iter_begin, iter_begin + nrows, false, mr, stream);
auto ret_pair = std::make_pair(std::move(uniq_tbl), std::vector<cudf::size_type>(num_partitions));
// this has the effect of rotating the set of partition sizes
// right by start_partition positions:
//
auto rotated_iter_begin = thrust::make_transform_iterator(
thrust::make_counting_iterator<cudf::size_type>(0),
[num_partitions, start_partition, max_partition_size, num_partitions_max_size](auto index) {
return ((index + num_partitions - start_partition) % num_partitions < num_partitions_max_size
? max_partition_size
: max_partition_size - 1);
});
// then exclusive_scan on the resulting
// rotated partition sizes to get the partition offsets
// corresponding to start_partition:
// Since:
//"num_partitions is usually going to be relatively small
//(<1,000), as such, it's probably more expensive to do this on the device.
// Instead, do it on the host directly into the std::vector and avoid the memcpy." - JH
//
thrust::exclusive_scan(
thrust::host, rotated_iter_begin, rotated_iter_begin + num_partitions, ret_pair.second.begin());
return ret_pair;
}
} // namespace detail
std::pair<std::unique_ptr<cudf::table>, std::vector<cudf::size_type>> round_robin_partition(
table_view const& input,
cudf::size_type num_partitions,
cudf::size_type start_partition = 0,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource())
{
CUDF_FUNC_RANGE();
return cudf::detail::round_robin_partition(input, num_partitions, start_partition, mr);
}
} // namespace cudf
|
0f4114a6b16ed053960bdf03b48e5d8f3aa96d1c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "LeftRightBound2D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *Hs = NULL;
hipMalloc(&Hs, XSIZE*YSIZE);
double *Ztopo = NULL;
hipMalloc(&Ztopo, XSIZE*YSIZE);
double *K2e = NULL;
hipMalloc(&K2e, XSIZE*YSIZE);
double *K2w = NULL;
hipMalloc(&K2w, XSIZE*YSIZE);
int BC2D = 1;
int M = 2;
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
LeftRightBound2D), dim3(gridBlock),dim3(threadBlock), 0, 0, Hs,Ztopo,K2e,K2w,BC2D,M,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
LeftRightBound2D), dim3(gridBlock),dim3(threadBlock), 0, 0, Hs,Ztopo,K2e,K2w,BC2D,M,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
LeftRightBound2D), dim3(gridBlock),dim3(threadBlock), 0, 0, Hs,Ztopo,K2e,K2w,BC2D,M,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0f4114a6b16ed053960bdf03b48e5d8f3aa96d1c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "LeftRightBound2D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *Hs = NULL;
cudaMalloc(&Hs, XSIZE*YSIZE);
double *Ztopo = NULL;
cudaMalloc(&Ztopo, XSIZE*YSIZE);
double *K2e = NULL;
cudaMalloc(&K2e, XSIZE*YSIZE);
double *K2w = NULL;
cudaMalloc(&K2w, XSIZE*YSIZE);
int BC2D = 1;
int M = 2;
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
LeftRightBound2D<<<gridBlock,threadBlock>>>(Hs,Ztopo,K2e,K2w,BC2D,M,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
LeftRightBound2D<<<gridBlock,threadBlock>>>(Hs,Ztopo,K2e,K2w,BC2D,M,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
LeftRightBound2D<<<gridBlock,threadBlock>>>(Hs,Ztopo,K2e,K2w,BC2D,M,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.