hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
d0567f4d4ee0994d8f42ef525f45ada36412ba26.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. A 1D thread block and 2D grid are used. sumArraysOnHost sequentially
* iterates through vector elements on the host.
*/
void initialData(float *ip, const int size)
{
int i;
for(i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx,
const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// grid 2D block 1D
__global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx,
int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny)
MatC[idx] = MatA[idx] + MatB[idx];
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size of matrix
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
double iStart = seconds();
initialData(h_A, nxy);
initialData(h_B, nxy);
double iElaps = seconds() - iStart;
printf("Matrix initialization elapsed %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result checks
iStart = seconds();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps = seconds() - iStart;
printf("sumMatrixOnHost elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
CHECK(hipMalloc((void **)&d_MatA, nBytes));
CHECK(hipMalloc((void **)&d_MatB, nBytes));
CHECK(hipMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
int dimx = 32;
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, ny);
iStart = seconds();
hipLaunchKernelGGL(( sumMatrixOnGPUMix), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("sumMatrixOnGPU2D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x,
grid.y,
block.x, block.y, iElaps);
// check kernel error
CHECK(hipGetLastError());
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(hipFree(d_MatA));
CHECK(hipFree(d_MatB));
CHECK(hipFree(d_MatC));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return (0);
}
| d0567f4d4ee0994d8f42ef525f45ada36412ba26.cu | #include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. A 1D thread block and 2D grid are used. sumArraysOnHost sequentially
* iterates through vector elements on the host.
*/
void initialData(float *ip, const int size)
{
int i;
for(i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx,
const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// grid 2D block 1D
__global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx,
int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny)
MatC[idx] = MatA[idx] + MatB[idx];
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of matrix
int nx = 1 << 14;
int ny = 1 << 14;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
double iStart = seconds();
initialData(h_A, nxy);
initialData(h_B, nxy);
double iElaps = seconds() - iStart;
printf("Matrix initialization elapsed %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add matrix at host side for result checks
iStart = seconds();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps = seconds() - iStart;
printf("sumMatrixOnHost elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
CHECK(cudaMalloc((void **)&d_MatA, nBytes));
CHECK(cudaMalloc((void **)&d_MatB, nBytes));
CHECK(cudaMalloc((void **)&d_MatC, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
int dimx = 32;
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, ny);
iStart = seconds();
sumMatrixOnGPUMix<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("sumMatrixOnGPU2D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x,
grid.y,
block.x, block.y, iElaps);
// check kernel error
CHECK(cudaGetLastError());
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
CHECK(cudaFree(d_MatA));
CHECK(cudaFree(d_MatB));
CHECK(cudaFree(d_MatC));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return (0);
}
|
beb3277aeae74f18478f9801cdd485bd43a059de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include "bmp/EasyBMP.h"
using namespace std;
#define WINDOW_SIZE 3
#define WINDOW_LENGHT WINDOW_SIZE * WINDOW_SIZE
texture<float, hipTextureType2D, hipReadModeElementType> tex;
__global__ void saltAndPepperWithCuda(float *output, int imageWidth, int imageHeight) {
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if (!(row < imageHeight && col < imageWidth)) {
return;
}
float filter[WINDOW_LENGHT] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int x = 0; x < WINDOW_SIZE; x++) {
for (int y = 0; y < WINDOW_SIZE; y++) {
filter[x * WINDOW_SIZE + y] = tex2D(tex, col + y - 1, row + x - 1);
}
}
for (int i = 0; i < WINDOW_LENGHT; i++) {
for (int j = i + 1; j < WINDOW_LENGHT; j++) {
if (filter[i] > filter[j]) {
float tmp = filter[i];
filter[i] = filter[j];
filter[j] = tmp;
}
}
}
output[row * imageWidth + col] = filter[(int)(WINDOW_LENGHT / 2)];
}
float *readLikeGrayScale(char *filePathInput, unsigned int *rows, unsigned int *cols) {
BMP Input;
Input.ReadFromFile(filePathInput);
*rows = Input.TellHeight();
*cols = Input.TellWidth();
float *grayscale = (float *)calloc(*rows * *cols, sizeof(float));
for (int j = 0; j < *rows; j++) {
for (int i = 0; i < *cols; i++) {
float gray = (float)floor(0.299 * Input(i, j)->Red +
0.587 * Input(i, j)->Green +
0.114 * Input(i, j)->Blue);
grayscale[j * *cols + i] = gray;
}
}
return grayscale;
}
void writeImage(char *filePath, float *grayscale, unsigned int rows, unsigned int cols) {
BMP Output;
Output.SetSize(cols, rows);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
RGBApixel pixel;
pixel.Red = grayscale[i * cols + j];
pixel.Green = grayscale[i * cols + j];
pixel.Blue = grayscale[i * cols + j];
pixel.Alpha = 0;
Output.SetPixel(j, i, pixel);
}
}
Output.WriteToFile(filePath);
}
int main() {
clock_t start, stop;
float *grayscale = 0;
unsigned int rows, cols;
grayscale = readLikeGrayScale("input.bmp", &rows, &cols);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray *cuArray;
start = clock();
hipMallocArray(&cuArray, &channelDesc, cols, rows);
hipMemcpyToArray(cuArray, 0, 0, grayscale, rows * cols * sizeof(float), hipMemcpyHostToDevice);
tex.addressMode[0] = hipAddressModeWrap;
tex.addressMode[1] = hipAddressModeWrap;
tex.filterMode = hipFilterModeLinear;
hipBindTextureToArray(tex, cuArray, channelDesc);
float *dev_output, *output;
output = (float *)calloc(rows * cols, sizeof(float));
hipMalloc(&dev_output, rows * cols * sizeof(float));
dim3 dimBlock(16, 16);
dim3 dimGrid(
(cols + dimBlock.x - 1) / dimBlock.x,
(rows + dimBlock.y - 1) / dimBlock.y
);
hipLaunchKernelGGL(( saltAndPepperWithCuda), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_output, cols, rows);
hipMemcpy(output, dev_output, rows * cols * sizeof(float), hipMemcpyDeviceToHost);
stop = clock();
printf("GPU time: %f s.\n", (stop - start) / (float)CLOCKS_PER_SEC);
writeImage("result.bmp", output, rows, cols);
hipFreeArray(cuArray);
hipFree(dev_output);
return 0;
} | beb3277aeae74f18478f9801cdd485bd43a059de.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include "bmp/EasyBMP.h"
using namespace std;
#define WINDOW_SIZE 3
#define WINDOW_LENGHT WINDOW_SIZE * WINDOW_SIZE
texture<float, cudaTextureType2D, cudaReadModeElementType> tex;
__global__ void saltAndPepperWithCuda(float *output, int imageWidth, int imageHeight) {
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if (!(row < imageHeight && col < imageWidth)) {
return;
}
float filter[WINDOW_LENGHT] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int x = 0; x < WINDOW_SIZE; x++) {
for (int y = 0; y < WINDOW_SIZE; y++) {
filter[x * WINDOW_SIZE + y] = tex2D(tex, col + y - 1, row + x - 1);
}
}
for (int i = 0; i < WINDOW_LENGHT; i++) {
for (int j = i + 1; j < WINDOW_LENGHT; j++) {
if (filter[i] > filter[j]) {
float tmp = filter[i];
filter[i] = filter[j];
filter[j] = tmp;
}
}
}
output[row * imageWidth + col] = filter[(int)(WINDOW_LENGHT / 2)];
}
float *readLikeGrayScale(char *filePathInput, unsigned int *rows, unsigned int *cols) {
BMP Input;
Input.ReadFromFile(filePathInput);
*rows = Input.TellHeight();
*cols = Input.TellWidth();
float *grayscale = (float *)calloc(*rows * *cols, sizeof(float));
for (int j = 0; j < *rows; j++) {
for (int i = 0; i < *cols; i++) {
float gray = (float)floor(0.299 * Input(i, j)->Red +
0.587 * Input(i, j)->Green +
0.114 * Input(i, j)->Blue);
grayscale[j * *cols + i] = gray;
}
}
return grayscale;
}
void writeImage(char *filePath, float *grayscale, unsigned int rows, unsigned int cols) {
BMP Output;
Output.SetSize(cols, rows);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
RGBApixel pixel;
pixel.Red = grayscale[i * cols + j];
pixel.Green = grayscale[i * cols + j];
pixel.Blue = grayscale[i * cols + j];
pixel.Alpha = 0;
Output.SetPixel(j, i, pixel);
}
}
Output.WriteToFile(filePath);
}
int main() {
clock_t start, stop;
float *grayscale = 0;
unsigned int rows, cols;
grayscale = readLikeGrayScale("input.bmp", &rows, &cols);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray *cuArray;
start = clock();
cudaMallocArray(&cuArray, &channelDesc, cols, rows);
cudaMemcpyToArray(cuArray, 0, 0, grayscale, rows * cols * sizeof(float), cudaMemcpyHostToDevice);
tex.addressMode[0] = cudaAddressModeWrap;
tex.addressMode[1] = cudaAddressModeWrap;
tex.filterMode = cudaFilterModeLinear;
cudaBindTextureToArray(tex, cuArray, channelDesc);
float *dev_output, *output;
output = (float *)calloc(rows * cols, sizeof(float));
cudaMalloc(&dev_output, rows * cols * sizeof(float));
dim3 dimBlock(16, 16);
dim3 dimGrid(
(cols + dimBlock.x - 1) / dimBlock.x,
(rows + dimBlock.y - 1) / dimBlock.y
);
saltAndPepperWithCuda<<<dimGrid, dimBlock>>>(dev_output, cols, rows);
cudaMemcpy(output, dev_output, rows * cols * sizeof(float), cudaMemcpyDeviceToHost);
stop = clock();
printf("GPU time: %f s.\n", (stop - start) / (float)CLOCKS_PER_SEC);
writeImage("result.bmp", output, rows, cols);
cudaFreeArray(cuArray);
cudaFree(dev_output);
return 0;
} |
927991f17a2a5fa838314fb455c1fc5d30b5696d.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <cstdlib>
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include <omp.h>
#include <string>
#define NUMOFPTCLS 8192 //should correspond to the block size on device
#define BLCSIZE 256 //block size on device
#define SIMTIME 1000.0f
#define MODELINGTIME 1000.0f
#define STEP 0.2f
#define MODEL_PARAM 0.00165344f // MODEL_PARAM = a_0/R * 0.75
#define MINDIST 0.000001f // minimal dist
__global__ void onCernelCalc(float *X, float *Y, float *Z,
float *UX, float *UY, float *UZ,
int blcs_num, int ptclsNumm, float modelParam)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float ux_i = 0.0f; float uy_i = 0.0f; float uz_i = 0.0f;
float Xi = X[id]; float Yi = Y[id]; float Zi = Z[id]; //current element coord-s
float hx, hy, hz; // (r_i - r_j)
float r; // |r_i - r_j|
const int SMBS = 256; //shared memory block size
__shared__ float Xs[SMBS]; __shared__ float Ys[SMBS]; __shared__ float Zs[SMBS]; //copy of state in shared mem
for (int block = 0; block < blcs_num; ++block)
{
Xs[threadIdx.x] = X[threadIdx.x + block * blockDim.x]; //parallel copy
Ys[threadIdx.x] = Y[threadIdx.x + block * blockDim.x];
Zs[threadIdx.x] = Z[threadIdx.x + block * blockDim.x];
__syncthreads();
for (int j = 0; j < blockDim.x; ++j)
{
if ((j + block * blockDim.x) != id)
{
hx = Xi - Xs[j];
hy = Yi - Ys[j];
hz = Zi - Zs[j];
r = sqrtf(hx*hx + hy*hy + hz*hz) + MINDIST;
ux_i += -hx * (hz / (r*r*r));
uy_i += -hy * (hz / (r*r*r));
uz_i += -1.0f / (r) - hz * (hz / (r*r*r));
}
}
__syncthreads();
}
UX[id] = modelParam * ux_i;
UY[id] = modelParam * uy_i;
UZ[id] = modelParam * uz_i - 1.0f;
}
void viCalc(float *X, float *Y, float *Z, float *VX, float *VY, float *VZ,
float *devX, float *devY, float *devZ, float *devVX, float *devVY, float *devVZ,
int ptclsNumm, float modelParam)
{
unsigned int array_size = sizeof(float) * ptclsNumm;
//copy state from host to device
hipMemcpy(devX, X, array_size, hipMemcpyHostToDevice);
hipMemcpy(devY, Y, array_size, hipMemcpyHostToDevice);
hipMemcpy(devZ, Z, array_size, hipMemcpyHostToDevice);
hipMemcpy(devVX, VX, array_size, hipMemcpyHostToDevice);
hipMemcpy(devVY, VY, array_size, hipMemcpyHostToDevice);
hipMemcpy(devVZ, VZ, array_size, hipMemcpyHostToDevice);
int numOfThreads = BLCSIZE;
int numOfBlocks = ptclsNumm / BLCSIZE;
hipLaunchKernelGGL(( onCernelCalc) , dim3(numOfBlocks), dim3(numOfThreads), 0, 0, devX, devY, devZ, devVX, devVY, devVZ, numOfBlocks, ptclsNumm, modelParam);
hipMemcpy(VX, devVX, array_size, hipMemcpyDeviceToHost);
hipMemcpy(VY, devVY, array_size, hipMemcpyDeviceToHost);
hipMemcpy(VZ, devVZ, array_size, hipMemcpyDeviceToHost);
}
void init(float *X, float *Y, float *Z, float *VX, float *VY, float *VZ, int num)
{
for (int i = 0; i < num; ++i)
{
VX[i] = 0;
VY[i] = 0;
VZ[i] = 0;
}
float const R1 = 4.0f;
float const R2 = 2.0f;
int xyRadialNum = num / 128;
int zRadialNum = num / 256;
int rRadialNum = (num / xyRadialNum ) / zRadialNum;
float fi, theta, r_param;
int counter = 0;
for (int xy = 0; xy < xyRadialNum; ++xy)
{
for (int z = 0; z < zRadialNum; ++z)
{
for (int r = 0; ((r < rRadialNum) && (counter < num)); ++r, ++counter)
{
fi = ((rand() * 1.0) / (RAND_MAX * 1.0)) * 2 * 3.1415926535;
theta = ((rand() * 1.0) / (RAND_MAX * 1.0)) * 2 * 3.1415926535;
r_param = ((rand() * 1.0) / (RAND_MAX * 1.0)) * R2;
X[counter] = R1 * cos(fi) + r_param * cos(theta) * cos(fi);
Y[counter] = R1 * sin(fi) + r_param * cos(theta) * sin(fi);
Z[counter] = r_param * sin(theta);
}
}
}
}
void init_s(float *X, float *Y, float *Z, float *VX, float *VY, float *VZ, int num)
{
for (int i = 0; i < num; ++i)
{
VX[i] = 0;
VY[i] = 0;
VZ[i] = 0;
}
float const R1 = 4.0f;
int xyRadialNum = num / 128;
int zRadialNum = num / 256;
int rRadialNum = (num / xyRadialNum) / zRadialNum;
float fi, theta, r_param;
int counter = 0;
for (int xy = 0; xy < xyRadialNum; ++xy)
{
for (int z = 0; z < zRadialNum; ++z)
{
for (int r = 0; ((r < rRadialNum) && (counter < num)); ++r, ++counter)
{
fi = ((rand() * 1.0) / (RAND_MAX * 1.0)) * 2 * 3.1415926535;
theta = ((rand() * 1.0) / (RAND_MAX * 1.0)) * 2 * 3.1415926535;
r_param = ((rand() * 1.0) / (RAND_MAX * 1.0)) * R1;
X[counter] = r_param * cos(fi) * cos(theta);
Y[counter] = r_param * sin(fi) * cos(theta);
Z[counter] = r_param * sin(theta);
}
}
}
}
void observe(float *X, float *Y, float *Z, int num)
{
for (int i = 0; i < num; ++i)
{
std::cout << X[i] << '\t' << Y[i] << '\t' << Z[i] << std::endl;
}
std::cout << '#' << std::endl;
}
void RK4_step(float ** X, float ** Y, float ** Z, float * VX, float * VY, float * VZ,
float *devX, float * devY, float * devZ, float * devVX, float * devVY, float * devVZ,
float ** KX, float ** KY, float ** KZ, int ptclsNumm, float modelParam)
{
#pragma omp parallel for
for (int i = 0; i < ptclsNumm; ++i) {
KX[0][i] = 0.0f;
KY[0][i] = 0.0f;
KZ[0][i] = 0.0f;
X[1][i] = X[0][i];
Y[1][i] = Y[0][i];
Z[1][i] = Z[0][i];
}
viCalc(X[1], Y[1], Z[1], KX[0], KY[0], KZ[0], devX, devY, devZ, devVX, devVY, devVZ, ptclsNumm, modelParam);
#pragma omp parallel for
for (int i = 0; i < ptclsNumm; ++i) {
KX[1][i] = 0.0f;
KY[1][i] = 0.0f;
KZ[1][i] = 0.0f;
X[1][i] = X[0][i] + KX[0][i] * STEP * 0.5f;
Y[1][i] = Y[0][i] + KY[0][i] * STEP * 0.5f;
Z[1][i] = Z[0][i] + KZ[0][i] * STEP * 0.5f;
}
viCalc(X[1], Y[1], Z[1], KX[1], KY[1], KZ[1], devX, devY, devZ, devVX, devVY, devVZ, ptclsNumm, modelParam);
#pragma omp parallel for
for (int i = 0; i < ptclsNumm; ++i) {
KX[2][i] = 0.0f;
KY[2][i] = 0.0f;
KZ[2][i] = 0.0f;
X[1][i] = X[0][i] + KX[1][i] * STEP * 0.5f;
Y[1][i] = Y[0][i] + KY[1][i] * STEP * 0.5f;
Z[1][i] = Z[0][i] + KZ[1][i] * STEP * 0.5f;
}
viCalc(X[1], Y[1], Z[1], KX[2], KY[2], KZ[2], devX, devY, devZ, devVX, devVY, devVZ, ptclsNumm, modelParam);
#pragma omp parallel for
for (int i = 0; i < ptclsNumm; ++i) {
KX[3][i] = 0.0f;
KY[3][i] = 0.0f;
KZ[3][i] = 0.0f;
X[1][i] = X[0][i] + KX[2][i] * STEP;
Y[1][i] = Y[0][i] + KY[2][i] * STEP;
Z[1][i] = Z[0][i] + KZ[2][i] * STEP;
}
viCalc(X[1], Y[1], Z[1], KX[3], KY[3], KZ[3], devX, devY, devZ, devVX, devVY, devVZ, ptclsNumm, modelParam);
#pragma omp parallel for
for (int i = 0; i < ptclsNumm; ++i)
{
X[0][i] += 1.0f / 6.0f*(KX[0][i] + 2 * KX[1][i] + 2 * KX[2][i] + KX[3][i]) * STEP;
Y[0][i] += 1.0f / 6.0f*(KY[0][i] + 2 * KY[1][i] + 2 * KY[2][i] + KY[3][i]) * STEP;
Z[0][i] += 1.0f / 6.0f*(KZ[0][i] + 2 * KZ[1][i] + 2 * KZ[2][i] + KZ[3][i]) * STEP;
}
}
int main()
{
float ** KX = new float*[4];
float ** KY = new float*[4];
float ** KZ = new float*[4];
float ** X = new float*[2];
float ** Y = new float*[2];
float ** Z = new float*[2];
for (int param = 32; param < 33; param *= 2)
{
int ptclsNum = 256 * param;
float modelparam = MODEL_PARAM;
//alloc arrays on host
for (int gh = 0; gh < 4; ++gh)
{
KX[gh] = new float[ptclsNum];
KY[gh] = new float[ptclsNum];
KZ[gh] = new float[ptclsNum];
}
for (int gh = 0; gh < 2; ++gh)
{
X[gh] = new float[ptclsNum];
Y[gh] = new float[ptclsNum];
Z[gh] = new float[ptclsNum];
}
float * VX = new float[ptclsNum];
float * VY = new float[ptclsNum];
float * VZ = new float[ptclsNum];
//alloc arrays on device
float * devX, *devY, *devZ, *devVX, *devVY, *devVZ;
unsigned int array_size = sizeof(float) * ptclsNum;
hipMalloc((void**)&devX, array_size); hipMalloc((void**)&devY, array_size); hipMalloc((void**)&devZ, array_size);
hipMalloc((void**)&devVX, array_size); hipMalloc((void**)&devVY, array_size); hipMalloc((void**)&devVZ, array_size);
std::string path = std::to_string(param);
std::freopen((path + "_out_torus.txt").c_str(), "w", stdout);
//init conditions for host
init(X[0], Y[0], Z[0], VX, VY, VZ, ptclsNum);
for (double t = 0.0f; t < MODELINGTIME; t += STEP)
{
RK4_step(X, Y, Z, VX, VY, VZ, devX, devY, devZ, devVX, devVY, devVZ, KX, KY, KZ, ptclsNum, modelparam);
if ((int(t * 1000) % 100) == 0) observe(X[0], Y[0], Z[0], ptclsNum);
std::cerr << t << " of " << MODELINGTIME << std::endl;
}
std::freopen((path + "_out_sphe.txt").c_str(), "w", stdout);
//init conditions for host
init_s(X[0], Y[0], Z[0], VX, VY, VZ, ptclsNum);
for (double t = 0.0f; t < MODELINGTIME; t += STEP)
{
RK4_step(X, Y, Z, VX, VY, VZ, devX, devY, devZ, devVX, devVY, devVZ, KX, KY, KZ, ptclsNum, modelparam);
if ((int(t * 1000) % 100) == 0) observe(X[0], Y[0], Z[0], ptclsNum);
std::cerr << t << " of " << MODELINGTIME << std::endl;
}
for (int gh = 0; gh < 4; ++gh)
{
delete [] KX[gh];
delete [] KY[gh];
delete [] KZ[gh];
}
for (int gh = 0; gh < 2; ++gh)
{
delete[] X[gh];
delete[] Y[gh];
delete[] Z[gh];
}
delete[] VX;
delete[] VY;
delete[] VZ;
hipFree(devX);
hipFree(devY);
hipFree(devZ);
hipFree(devVX);
hipFree(devVY);
hipFree(devVZ);
}
return 0;
}
| 927991f17a2a5fa838314fb455c1fc5d30b5696d.cu | #include <cmath>
#include <cstdlib>
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#include <omp.h>
#include <string>
#define NUMOFPTCLS 8192 //should correspond to the block size on device
#define BLCSIZE 256 //block size on device
#define SIMTIME 1000.0f
#define MODELINGTIME 1000.0f
#define STEP 0.2f
#define MODEL_PARAM 0.00165344f // MODEL_PARAM = a_0/R * 0.75
#define MINDIST 0.000001f // minimal dist
__global__ void onCernelCalc(float *X, float *Y, float *Z,
float *UX, float *UY, float *UZ,
int blcs_num, int ptclsNumm, float modelParam)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
float ux_i = 0.0f; float uy_i = 0.0f; float uz_i = 0.0f;
float Xi = X[id]; float Yi = Y[id]; float Zi = Z[id]; //current element coord-s
float hx, hy, hz; // (r_i - r_j)
float r; // |r_i - r_j|
const int SMBS = 256; //shared memory block size
__shared__ float Xs[SMBS]; __shared__ float Ys[SMBS]; __shared__ float Zs[SMBS]; //copy of state in shared mem
for (int block = 0; block < blcs_num; ++block)
{
Xs[threadIdx.x] = X[threadIdx.x + block * blockDim.x]; //parallel copy
Ys[threadIdx.x] = Y[threadIdx.x + block * blockDim.x];
Zs[threadIdx.x] = Z[threadIdx.x + block * blockDim.x];
__syncthreads();
for (int j = 0; j < blockDim.x; ++j)
{
if ((j + block * blockDim.x) != id)
{
hx = Xi - Xs[j];
hy = Yi - Ys[j];
hz = Zi - Zs[j];
r = sqrtf(hx*hx + hy*hy + hz*hz) + MINDIST;
ux_i += -hx * (hz / (r*r*r));
uy_i += -hy * (hz / (r*r*r));
uz_i += -1.0f / (r) - hz * (hz / (r*r*r));
}
}
__syncthreads();
}
UX[id] = modelParam * ux_i;
UY[id] = modelParam * uy_i;
UZ[id] = modelParam * uz_i - 1.0f;
}
void viCalc(float *X, float *Y, float *Z, float *VX, float *VY, float *VZ,
float *devX, float *devY, float *devZ, float *devVX, float *devVY, float *devVZ,
int ptclsNumm, float modelParam)
{
unsigned int array_size = sizeof(float) * ptclsNumm;
//copy state from host to device
cudaMemcpy(devX, X, array_size, cudaMemcpyHostToDevice);
cudaMemcpy(devY, Y, array_size, cudaMemcpyHostToDevice);
cudaMemcpy(devZ, Z, array_size, cudaMemcpyHostToDevice);
cudaMemcpy(devVX, VX, array_size, cudaMemcpyHostToDevice);
cudaMemcpy(devVY, VY, array_size, cudaMemcpyHostToDevice);
cudaMemcpy(devVZ, VZ, array_size, cudaMemcpyHostToDevice);
int numOfThreads = BLCSIZE;
int numOfBlocks = ptclsNumm / BLCSIZE;
onCernelCalc <<<numOfBlocks, numOfThreads>>> (devX, devY, devZ, devVX, devVY, devVZ, numOfBlocks, ptclsNumm, modelParam);
cudaMemcpy(VX, devVX, array_size, cudaMemcpyDeviceToHost);
cudaMemcpy(VY, devVY, array_size, cudaMemcpyDeviceToHost);
cudaMemcpy(VZ, devVZ, array_size, cudaMemcpyDeviceToHost);
}
void init(float *X, float *Y, float *Z, float *VX, float *VY, float *VZ, int num)
{
for (int i = 0; i < num; ++i)
{
VX[i] = 0;
VY[i] = 0;
VZ[i] = 0;
}
float const R1 = 4.0f;
float const R2 = 2.0f;
int xyRadialNum = num / 128;
int zRadialNum = num / 256;
int rRadialNum = (num / xyRadialNum ) / zRadialNum;
float fi, theta, r_param;
int counter = 0;
for (int xy = 0; xy < xyRadialNum; ++xy)
{
for (int z = 0; z < zRadialNum; ++z)
{
for (int r = 0; ((r < rRadialNum) && (counter < num)); ++r, ++counter)
{
fi = ((rand() * 1.0) / (RAND_MAX * 1.0)) * 2 * 3.1415926535;
theta = ((rand() * 1.0) / (RAND_MAX * 1.0)) * 2 * 3.1415926535;
r_param = ((rand() * 1.0) / (RAND_MAX * 1.0)) * R2;
X[counter] = R1 * cos(fi) + r_param * cos(theta) * cos(fi);
Y[counter] = R1 * sin(fi) + r_param * cos(theta) * sin(fi);
Z[counter] = r_param * sin(theta);
}
}
}
}
void init_s(float *X, float *Y, float *Z, float *VX, float *VY, float *VZ, int num)
{
for (int i = 0; i < num; ++i)
{
VX[i] = 0;
VY[i] = 0;
VZ[i] = 0;
}
float const R1 = 4.0f;
int xyRadialNum = num / 128;
int zRadialNum = num / 256;
int rRadialNum = (num / xyRadialNum) / zRadialNum;
float fi, theta, r_param;
int counter = 0;
for (int xy = 0; xy < xyRadialNum; ++xy)
{
for (int z = 0; z < zRadialNum; ++z)
{
for (int r = 0; ((r < rRadialNum) && (counter < num)); ++r, ++counter)
{
fi = ((rand() * 1.0) / (RAND_MAX * 1.0)) * 2 * 3.1415926535;
theta = ((rand() * 1.0) / (RAND_MAX * 1.0)) * 2 * 3.1415926535;
r_param = ((rand() * 1.0) / (RAND_MAX * 1.0)) * R1;
X[counter] = r_param * cos(fi) * cos(theta);
Y[counter] = r_param * sin(fi) * cos(theta);
Z[counter] = r_param * sin(theta);
}
}
}
}
void observe(float *X, float *Y, float *Z, int num)
{
for (int i = 0; i < num; ++i)
{
std::cout << X[i] << '\t' << Y[i] << '\t' << Z[i] << std::endl;
}
std::cout << '#' << std::endl;
}
void RK4_step(float ** X, float ** Y, float ** Z, float * VX, float * VY, float * VZ,
float *devX, float * devY, float * devZ, float * devVX, float * devVY, float * devVZ,
float ** KX, float ** KY, float ** KZ, int ptclsNumm, float modelParam)
{
#pragma omp parallel for
for (int i = 0; i < ptclsNumm; ++i) {
KX[0][i] = 0.0f;
KY[0][i] = 0.0f;
KZ[0][i] = 0.0f;
X[1][i] = X[0][i];
Y[1][i] = Y[0][i];
Z[1][i] = Z[0][i];
}
viCalc(X[1], Y[1], Z[1], KX[0], KY[0], KZ[0], devX, devY, devZ, devVX, devVY, devVZ, ptclsNumm, modelParam);
#pragma omp parallel for
for (int i = 0; i < ptclsNumm; ++i) {
KX[1][i] = 0.0f;
KY[1][i] = 0.0f;
KZ[1][i] = 0.0f;
X[1][i] = X[0][i] + KX[0][i] * STEP * 0.5f;
Y[1][i] = Y[0][i] + KY[0][i] * STEP * 0.5f;
Z[1][i] = Z[0][i] + KZ[0][i] * STEP * 0.5f;
}
viCalc(X[1], Y[1], Z[1], KX[1], KY[1], KZ[1], devX, devY, devZ, devVX, devVY, devVZ, ptclsNumm, modelParam);
#pragma omp parallel for
for (int i = 0; i < ptclsNumm; ++i) {
KX[2][i] = 0.0f;
KY[2][i] = 0.0f;
KZ[2][i] = 0.0f;
X[1][i] = X[0][i] + KX[1][i] * STEP * 0.5f;
Y[1][i] = Y[0][i] + KY[1][i] * STEP * 0.5f;
Z[1][i] = Z[0][i] + KZ[1][i] * STEP * 0.5f;
}
viCalc(X[1], Y[1], Z[1], KX[2], KY[2], KZ[2], devX, devY, devZ, devVX, devVY, devVZ, ptclsNumm, modelParam);
#pragma omp parallel for
for (int i = 0; i < ptclsNumm; ++i) {
KX[3][i] = 0.0f;
KY[3][i] = 0.0f;
KZ[3][i] = 0.0f;
X[1][i] = X[0][i] + KX[2][i] * STEP;
Y[1][i] = Y[0][i] + KY[2][i] * STEP;
Z[1][i] = Z[0][i] + KZ[2][i] * STEP;
}
viCalc(X[1], Y[1], Z[1], KX[3], KY[3], KZ[3], devX, devY, devZ, devVX, devVY, devVZ, ptclsNumm, modelParam);
#pragma omp parallel for
for (int i = 0; i < ptclsNumm; ++i)
{
X[0][i] += 1.0f / 6.0f*(KX[0][i] + 2 * KX[1][i] + 2 * KX[2][i] + KX[3][i]) * STEP;
Y[0][i] += 1.0f / 6.0f*(KY[0][i] + 2 * KY[1][i] + 2 * KY[2][i] + KY[3][i]) * STEP;
Z[0][i] += 1.0f / 6.0f*(KZ[0][i] + 2 * KZ[1][i] + 2 * KZ[2][i] + KZ[3][i]) * STEP;
}
}
int main()
{
float ** KX = new float*[4];
float ** KY = new float*[4];
float ** KZ = new float*[4];
float ** X = new float*[2];
float ** Y = new float*[2];
float ** Z = new float*[2];
for (int param = 32; param < 33; param *= 2)
{
int ptclsNum = 256 * param;
float modelparam = MODEL_PARAM;
//alloc arrays on host
for (int gh = 0; gh < 4; ++gh)
{
KX[gh] = new float[ptclsNum];
KY[gh] = new float[ptclsNum];
KZ[gh] = new float[ptclsNum];
}
for (int gh = 0; gh < 2; ++gh)
{
X[gh] = new float[ptclsNum];
Y[gh] = new float[ptclsNum];
Z[gh] = new float[ptclsNum];
}
float * VX = new float[ptclsNum];
float * VY = new float[ptclsNum];
float * VZ = new float[ptclsNum];
//alloc arrays on device
float * devX, *devY, *devZ, *devVX, *devVY, *devVZ;
unsigned int array_size = sizeof(float) * ptclsNum;
cudaMalloc((void**)&devX, array_size); cudaMalloc((void**)&devY, array_size); cudaMalloc((void**)&devZ, array_size);
cudaMalloc((void**)&devVX, array_size); cudaMalloc((void**)&devVY, array_size); cudaMalloc((void**)&devVZ, array_size);
std::string path = std::to_string(param);
std::freopen((path + "_out_torus.txt").c_str(), "w", stdout);
//init conditions for host
init(X[0], Y[0], Z[0], VX, VY, VZ, ptclsNum);
for (double t = 0.0f; t < MODELINGTIME; t += STEP)
{
RK4_step(X, Y, Z, VX, VY, VZ, devX, devY, devZ, devVX, devVY, devVZ, KX, KY, KZ, ptclsNum, modelparam);
if ((int(t * 1000) % 100) == 0) observe(X[0], Y[0], Z[0], ptclsNum);
std::cerr << t << " of " << MODELINGTIME << std::endl;
}
std::freopen((path + "_out_sphe.txt").c_str(), "w", stdout);
//init conditions for host
init_s(X[0], Y[0], Z[0], VX, VY, VZ, ptclsNum);
for (double t = 0.0f; t < MODELINGTIME; t += STEP)
{
RK4_step(X, Y, Z, VX, VY, VZ, devX, devY, devZ, devVX, devVY, devVZ, KX, KY, KZ, ptclsNum, modelparam);
if ((int(t * 1000) % 100) == 0) observe(X[0], Y[0], Z[0], ptclsNum);
std::cerr << t << " of " << MODELINGTIME << std::endl;
}
for (int gh = 0; gh < 4; ++gh)
{
delete [] KX[gh];
delete [] KY[gh];
delete [] KZ[gh];
}
for (int gh = 0; gh < 2; ++gh)
{
delete[] X[gh];
delete[] Y[gh];
delete[] Z[gh];
}
delete[] VX;
delete[] VY;
delete[] VZ;
cudaFree(devX);
cudaFree(devY);
cudaFree(devZ);
cudaFree(devVX);
cudaFree(devVY);
cudaFree(devVZ);
}
return 0;
}
|
251c51035b609c7e09777ee69df9acf59ebd319f.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example adopts example 16 to use 3xTF32 to bring FP32 accuracy with 2x performance
compared with CUDA Cores. See example 27 for the trick of 3xTF32.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = float; // Data type of elements in input tensor
using ElementInputB = float; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
// 3xTF32 Fprop
using Conv2dFpropKernel_3xTF32 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
// Only thing needs to be changed from normal Fprop
cutlass::arch::OpMultiplyAddFastF32,
IteratorAlgorithm
>::Kernel;
// 1xTF32 Fprop
using Conv2dFpropKernel_1xTF32 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm_3xTF32 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel_3xTF32>;
using ImplicitGemm_1xTF32 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel_1xTF32>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 4;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "28_ampere_3xtf32_fast_accurate_tensorop_fprop example\n\n"
<< " This example uses Ampere's Tensor Core operators on F16 data types to compute\n"
<< " forward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n <int> Input tensor extent N\n"
<< " --h <int> Input tensor extent H\n"
<< " --w <int> Input tensor extent W\n"
<< " --c <int> Input tensor extent C\n"
<< " --k <int> Filter extent K\n"
<< " --r <int> Filter extent R\n"
<< " --s <int> Filter extent S\n\n"
<< " --alpha <float> Epilogue scalar alpha\n"
<< " --beta <float> Epilogue scalar beta\n\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations <int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag <string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/28_ampere_3xtf32_fast_accurate_tensorop_fprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/28_ampere_3xtf32_fast_accurate_tensorop_fprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
hipError_t error;
double l2_norm_3xtf32_vs_fp64;
double l2_norm_1xtf32_vs_fp64;
double l2_norm_fp32_vs_fp64;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
error(hipSuccess),
l2_norm_3xtf32_vs_fp64(0),
l2_norm_1xtf32_vs_fp64(0),
l2_norm_fp32_vs_fp64(0) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs,3xTF32_vs_FP64,1xTF32_vs_FP64,FP32_vs_FP64";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< runtime_ms << ","
<< gflops << ","
<< l2_norm_3xtf32_vs_fp64 << ","
<< l2_norm_1xtf32_vs_fp64 << ","
<< l2_norm_fp32_vs_fp64;
return out;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
////////////////////////////////////////////////////////////////////////////////
/// 1. Initialize F32 Precision input tensors using CUTLASS helper functions
////////////////////////////////////////////////////////////////////////////////
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<float, LayoutInputA> tensor_a_F32(options.input_size);
cutlass::HostTensor<float, LayoutInputB> tensor_b_F32(options.filter_size);
cutlass::HostTensor<float, LayoutOutput> tensor_c_F32(options.output_size());
cutlass::HostTensor<float, LayoutOutput> tensor_d_F32(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a_F32.host_view(),
1,
ElementInputA(7),
ElementInputA(-8));
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_F32.host_view(),
1,
ElementInputB(7),
ElementInputB(-8));
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_F32.host_view(),
1,
ElementInputB(7),
ElementInputB(-8));
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(
tensor_d_F32.host_view());
// Copy data from host to GPU
tensor_a_F32.sync_device();
tensor_b_F32.sync_device();
tensor_c_F32.sync_device();
tensor_d_F32.sync_device();
////////////////////////////////////////////////////////////////////////////////
/// 2. Initialize F32 Precision input tensors using CUTLASS helper functions
////////////////////////////////////////////////////////////////////////////////
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<double, LayoutInputA> tensor_a_F64(options.input_size);
cutlass::HostTensor<double, LayoutInputB> tensor_b_F64(options.filter_size);
cutlass::HostTensor<double, LayoutOutput> tensor_c_F64(options.output_size());
cutlass::HostTensor<double, LayoutOutput> tensor_d_F64(options.output_size());
cutlass::HostTensor<float, LayoutOutput> tensor_d_3xTF32(options.output_size());
cutlass::HostTensor<float, LayoutOutput> tensor_d_1xTF32(options.output_size());
// Copy values from the DP tensors
cutlass::reference::host::TensorCopy(tensor_a_F64.host_view(), tensor_a_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_b_F64.host_view(), tensor_b_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_c_F64.host_view(), tensor_c_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_F64.host_view(), tensor_d_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_3xTF32.host_view(), tensor_d_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_1xTF32.host_view(), tensor_d_F32.host_view());
// Copy data from host to GPU
tensor_a_F64.sync_device();
tensor_b_F64.sync_device();
tensor_c_F64.sync_device();
tensor_d_F64.sync_device();
tensor_d_3xTF32.sync_device();
tensor_d_1xTF32.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
////////////////////////////////////////////////////////////////////////////////
/// 3. Run 3xTF32 kernel within a profiling loop
////////////////////////////////////////////////////////////////////////////////
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm_3xTF32::Arguments arguments_3xTF32{
problem_size,
tensor_a_F32.device_ref(),
tensor_b_F32.device_ref(),
tensor_c_F32.device_ref(),
tensor_d_3xTF32.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm_3xTF32 implicit_gemm_op_3xTF32;
size_t workspace_size_3xTF32 = implicit_gemm_op_3xTF32.get_workspace_size(arguments_3xTF32);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace_3xTF32(workspace_size_3xTF32);
result.status = implicit_gemm_op_3xTF32.can_implement(arguments_3xTF32);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op_3xTF32.initialize(arguments_3xTF32, workspace_3xTF32.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op_3xTF32();
CUTLASS_CHECK(result.status);
//
// Performance measurement
//
hipEvent_t events[2];
for (auto & event : events) {
result.error = hipEventCreate(&event);
if (result.error != hipSuccess) {
std::cerr << "hipEventCreate() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = hipEventRecord(events[0]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op_3xTF32();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = hipEventRecord(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = hipEventSynchronize(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventSynchronize() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = hipEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != hipSuccess) {
std::cerr << "cudaEventElapsed() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)hipEventDestroy(event);
}
tensor_d_3xTF32.sync_host();
////////////////////////////////////////////////////////////////////////////////
/// 4. Run 1xTF32 kernel within a profiling loop
////////////////////////////////////////////////////////////////////////////////
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm_1xTF32::Arguments arguments_1xTF32{
problem_size,
tensor_a_F32.device_ref(),
tensor_b_F32.device_ref(),
tensor_c_F32.device_ref(),
tensor_d_1xTF32.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm_1xTF32 implicit_gemm_op_1xTF32;
size_t workspace_size_1xTF32 = implicit_gemm_op_1xTF32.get_workspace_size(arguments_1xTF32);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace_1xTF32(workspace_size_1xTF32);
result.status = implicit_gemm_op_1xTF32.can_implement(arguments_1xTF32);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op_1xTF32.initialize(arguments_1xTF32, workspace_1xTF32.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op_1xTF32();
CUTLASS_CHECK(result.status);
tensor_d_1xTF32.sync_host();
////////////////////////////////////////////////////////////////////////////////
// Run reference kernel (F64)
////////////////////////////////////////////////////////////////////////////////
cutlass::reference::device::Conv2d<
double,
LayoutInputA,
double,
LayoutInputB,
double,
LayoutOutput,
double,
double
>(
cutlass::conv::Operator::kFprop,
problem_size,
tensor_a_F64.device_ref(),
tensor_b_F64.device_ref(),
tensor_c_F64.device_ref(),
tensor_d_F64.device_ref(),
options.alpha,
options.beta);
// Wait for kernels to finish
hipDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d_F64.sync_host();
////////////////////////////////////////////////////////////////////////////////
// Run reference kernel (F32)
////////////////////////////////////////////////////////////////////////////////
cutlass::reference::device::Conv2d<
float,
LayoutInputA,
float,
LayoutInputB,
float,
LayoutOutput,
float,
float
>(
cutlass::conv::Operator::kFprop,
problem_size,
tensor_a_F32.device_ref(),
tensor_b_F32.device_ref(),
tensor_c_F32.device_ref(),
tensor_d_F32.device_ref(),
options.alpha,
options.beta);
// Wait for kernels to finish
hipDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d_F32.sync_host();
////////////////////////////////////////////////////////////////////////////////
/////// Compute l2 norms
////////////////////////////////////////////////////////////////////////////////
// l2 norm 3xTF32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_3xTF32_in_F64(options.output_size());
cutlass::reference::host::TensorCopy(tensor_d_3xTF32_in_F64.host_view(), tensor_d_3xTF32.host_view());
result.l2_norm_3xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_3xTF32_in_F64.host_view(), tensor_d_F64.host_view());
// l2 norm 1xTF32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_1xTF32_in_F64(options.output_size());
cutlass::reference::host::TensorCopy(tensor_d_1xTF32_in_F64.host_view(), tensor_d_1xTF32.host_view());
result.l2_norm_1xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_1xTF32_in_F64.host_view(), tensor_d_F64.host_view());
// l2 norm F32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_F32_in_F64(options.output_size());
cutlass::reference::host::TensorCopy(tensor_d_F32_in_F64.host_view(), tensor_d_F32.host_view());
result.l2_norm_fp32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_F32_in_F64.host_view(), tensor_d_F64.host_view());
///////////////////////////////////////////////////////////////////////////////
if (options.save_workspace) {
std::stringstream ss;
ss << "28_ampere_3xtf32_fast_accurate_tensorop_fprop_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a_F32.host_view() << "\n\n"
<< "Filters = \n" << tensor_b_F32.host_view() << "\n\n";
output_workspace << "TF32x3 = \n" << tensor_d_3xTF32.host_view() << std::endl;
output_workspace << "TF32x1 = \n" << tensor_d_1xTF32.host_view() << std::endl;
output_workspace << "FP32 = \n" << tensor_d_F32.host_view() << std::endl;
output_workspace << "FP64 = \n" << tensor_d_F64.host_view() << "\n\n";
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
CUDA_CHECK(hipGetDeviceProperties(&props, 0));
if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {1, 32, 64, 128, 256};
struct Benchmark {
int h, w, c, k, r, s;
} layers[] = {
{56, 56, 64, 256, 1, 1},
{56, 56, 64, 64, 1, 1},
{56, 56, 64, 64, 3, 3},
{56, 56, 256, 64, 1, 1},
{56, 56, 256, 512, 1, 1},
{56, 56, 256, 128, 1, 1},
{28, 28, 128, 128, 3, 3},
{28, 28, 128, 512, 1, 1},
{28, 28, 512, 128, 1, 1},
{28, 28, 512, 1024, 1, 1},
{28, 28, 512, 256, 1, 1},
{14, 14, 256, 256, 3, 3},
{14, 14, 256, 1024, 1, 1},
{14, 14, 1024, 256, 1, 1},
{14, 14, 1024, 2048, 1, 1},
{14, 14, 1024, 512, 1, 1},
{7, 7, 512, 512, 3, 3},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 251c51035b609c7e09777ee69df9acf59ebd319f.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example adopts example 16 to use 3xTF32 to bring FP32 accuracy with 2x performance
compared with CUDA Cores. See example 27 for the trick of 3xTF32.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/host/convolution.h"
#include "cutlass/util/reference/host/error_metrics.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta)
using ElementInputA = float; // Data type of elements in input tensor
using ElementInputB = float; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 16>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 32, 16>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 3;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue>; // Data type for alpha/beta in linear combination
// 3xTF32 Fprop
using Conv2dFpropKernel_3xTF32 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
// Only thing needs to be changed from normal Fprop
cutlass::arch::OpMultiplyAddFastF32,
IteratorAlgorithm
>::Kernel;
// 1xTF32 Fprop
using Conv2dFpropKernel_1xTF32 = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm_3xTF32 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel_3xTF32>;
using ImplicitGemm_1xTF32 = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel_1xTF32>;
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
int iterations;
bool save_workspace;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
bool benchmark;
std::string tag;
Options():
help(false),
input_size(1, 32, 32, 32),
filter_size(32, 3, 3, 32),
padding(1, 1, 1, 1),
conv_stride(1, 1),
dilation(1, 1),
iterations(20),
save_workspace(false),
alpha(1),
beta(0),
benchmark(false) { }
// Verify the problem size is compatible with the CUTLASS Convolution implementation.
bool valid() {
//
// CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently,
// all pointers, strides, and tensor extents must be divisible by 8 elements.
//
int const kAlignment = 4;
if ((input_size.c() % kAlignment) ||
(filter_size.n() % kAlignment)) {
// misaligned tensors
return false;
}
// Invalid padding
if ((padding.h() != filter_size.h() / 2) ||
(padding.w() != filter_size.w() / 2)) {
return false;
}
return true;
}
/// Updates input and filter sizes
void update(
cutlass::Tensor4DCoord input_size,
cutlass::Tensor4DCoord filter_size) {
this->input_size = input_size;
this->filter_size = filter_size;
padding.n() = filter_size.h() / 2;
padding.h() = filter_size.h() / 2;
padding.w() = filter_size.w() / 2;
padding.c() = filter_size.w() / 2;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
if (cmd.check_cmd_line_flag("save-workspace")) {
save_workspace = true;
}
if (cmd.check_cmd_line_flag("benchmark")) {
benchmark = true;
}
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
filter_size.c() = input_size.c();
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
cmd.get_cmd_line_argument("tag", tag);
if (filter_size.h() == 3 && filter_size.w() == 3) {
padding = {1, 1, 1, 1};
}
else {
filter_size.h() = 1;
filter_size.w() = 1;
padding = {0, 0, 0, 0};
}
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "28_ampere_3xtf32_fast_accurate_tensorop_fprop example\n\n"
<< " This example uses Ampere's Tensor Core operators on F16 data types to compute\n"
<< " forward convolution on tensors of layout NHWC.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --n <int> Input tensor extent N\n"
<< " --h <int> Input tensor extent H\n"
<< " --w <int> Input tensor extent W\n"
<< " --c <int> Input tensor extent C\n"
<< " --k <int> Filter extent K\n"
<< " --r <int> Filter extent R\n"
<< " --s <int> Filter extent S\n\n"
<< " --alpha <float> Epilogue scalar alpha\n"
<< " --beta <float> Epilogue scalar beta\n\n"
<< " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n"
<< " --iterations <int> Number of profiling iterations to perform.\n"
<< " --save-workspace If set, workspace is written to a text file.\n"
<< " --tag <string> String to replicate across the first column in the results table\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/28_ampere_3xtf32_fast_accurate_tensorop_fprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n"
<< "$ ./examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/28_ampere_3xtf32_fast_accurate_tensorop_fprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n";
return out;
}
/// Computes the output tensor size (NPQK)
cutlass::Tensor4DCoord output_size() const {
return cutlass::Tensor4DCoord(
input_size.n(),
(input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1,
(input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1,
filter_size.n());
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of multiply-adds = NPQK * CRS
int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c());
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
double l2_norm_3xtf32_vs_fp64;
double l2_norm_1xtf32_vs_fp64;
double l2_norm_fp32_vs_fp64;
Result():
runtime_ms(0),
gflops(0),
status(cutlass::Status::kSuccess),
error(cudaSuccess),
l2_norm_3xtf32_vs_fp64(0),
l2_norm_1xtf32_vs_fp64(0),
l2_norm_fp32_vs_fp64(0) { }
static std::ostream & print_header(std::ostream &out, Options const &options) {
if (!options.tag.empty()) {
out << "Name,";
}
out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs,3xTF32_vs_FP64,1xTF32_vs_FP64,FP32_vs_FP64";
return out;
}
std::ostream & print(std::ostream &out, int idx, Options const &options) {
if (!options.tag.empty()) {
out << options.tag << ",";
}
out
<< "conv_" << idx << ","
<< options.input_size.n() << ","
<< options.input_size.h() << ","
<< options.input_size.w() << ","
<< options.input_size.c() << ","
<< options.filter_size.n() << ","
<< options.filter_size.h() << ","
<< options.filter_size.w() << ","
<< runtime_ms << ","
<< gflops << ","
<< l2_norm_3xtf32_vs_fp64 << ","
<< l2_norm_1xtf32_vs_fp64 << ","
<< l2_norm_fp32_vs_fp64;
return out;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Runs one benchmark
Result profile_convolution(Options const &options) {
Result result;
////////////////////////////////////////////////////////////////////////////////
/// 1. Initialize F32 Precision input tensors using CUTLASS helper functions
////////////////////////////////////////////////////////////////////////////////
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<float, LayoutInputA> tensor_a_F32(options.input_size);
cutlass::HostTensor<float, LayoutInputB> tensor_b_F32(options.filter_size);
cutlass::HostTensor<float, LayoutOutput> tensor_c_F32(options.output_size());
cutlass::HostTensor<float, LayoutOutput> tensor_d_F32(options.output_size());
//
// Initialize tensors
//
// Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_a_F32.host_view(),
1,
ElementInputA(7),
ElementInputA(-8));
// Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b_F32.host_view(),
1,
ElementInputB(7),
ElementInputB(-8));
// Fill tensor C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_F32.host_view(),
1,
ElementInputB(7),
ElementInputB(-8));
// Fill tensor D on host with zeros
cutlass::reference::host::TensorFill(
tensor_d_F32.host_view());
// Copy data from host to GPU
tensor_a_F32.sync_device();
tensor_b_F32.sync_device();
tensor_c_F32.sync_device();
tensor_d_F32.sync_device();
////////////////////////////////////////////////////////////////////////////////
/// 2. Initialize F32 Precision input tensors using CUTLASS helper functions
////////////////////////////////////////////////////////////////////////////////
//
// Allocate host-device tensors using the CUTLASS Utilities.
//
cutlass::HostTensor<double, LayoutInputA> tensor_a_F64(options.input_size);
cutlass::HostTensor<double, LayoutInputB> tensor_b_F64(options.filter_size);
cutlass::HostTensor<double, LayoutOutput> tensor_c_F64(options.output_size());
cutlass::HostTensor<double, LayoutOutput> tensor_d_F64(options.output_size());
cutlass::HostTensor<float, LayoutOutput> tensor_d_3xTF32(options.output_size());
cutlass::HostTensor<float, LayoutOutput> tensor_d_1xTF32(options.output_size());
// Copy values from the DP tensors
cutlass::reference::host::TensorCopy(tensor_a_F64.host_view(), tensor_a_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_b_F64.host_view(), tensor_b_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_c_F64.host_view(), tensor_c_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_F64.host_view(), tensor_d_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_3xTF32.host_view(), tensor_d_F32.host_view());
cutlass::reference::host::TensorCopy(tensor_d_1xTF32.host_view(), tensor_d_F32.host_view());
// Copy data from host to GPU
tensor_a_F64.sync_device();
tensor_b_F64.sync_device();
tensor_c_F64.sync_device();
tensor_d_F64.sync_device();
tensor_d_3xTF32.sync_device();
tensor_d_1xTF32.sync_device();
//
// Define arguments for CUTLASS Convolution
//
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation;
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
mode,
split_k_slices
);
////////////////////////////////////////////////////////////////////////////////
/// 3. Run 3xTF32 kernel within a profiling loop
////////////////////////////////////////////////////////////////////////////////
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm_3xTF32::Arguments arguments_3xTF32{
problem_size,
tensor_a_F32.device_ref(),
tensor_b_F32.device_ref(),
tensor_c_F32.device_ref(),
tensor_d_3xTF32.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm_3xTF32 implicit_gemm_op_3xTF32;
size_t workspace_size_3xTF32 = implicit_gemm_op_3xTF32.get_workspace_size(arguments_3xTF32);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace_3xTF32(workspace_size_3xTF32);
result.status = implicit_gemm_op_3xTF32.can_implement(arguments_3xTF32);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op_3xTF32.initialize(arguments_3xTF32, workspace_3xTF32.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op_3xTF32();
CUTLASS_CHECK(result.status);
//
// Performance measurement
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
}
// Record an event at the start of a series of convolution operations.
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Launch a sequence of implicit GEMM operations on the device
for (int iteration = 0; iteration < options.iterations; ++iteration) {
result.status = implicit_gemm_op_3xTF32();
CUTLASS_CHECK(result.status);
}
// Record an event when the convolutions have been launched.
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Print average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
tensor_d_3xTF32.sync_host();
////////////////////////////////////////////////////////////////////////////////
/// 4. Run 1xTF32 kernel within a profiling loop
////////////////////////////////////////////////////////////////////////////////
// Construct ImplicitGemm::Argument structure with conv2d
// problem size, data pointers, and epilogue values
typename ImplicitGemm_1xTF32::Arguments arguments_1xTF32{
problem_size,
tensor_a_F32.device_ref(),
tensor_b_F32.device_ref(),
tensor_c_F32.device_ref(),
tensor_d_1xTF32.device_ref(),
{options.alpha, options.beta},
};
//
// Initialize CUTLASS Convolution
//
ImplicitGemm_1xTF32 implicit_gemm_op_1xTF32;
size_t workspace_size_1xTF32 = implicit_gemm_op_1xTF32.get_workspace_size(arguments_1xTF32);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace_1xTF32(workspace_size_1xTF32);
result.status = implicit_gemm_op_1xTF32.can_implement(arguments_1xTF32);
CUTLASS_CHECK(result.status);
result.status = implicit_gemm_op_1xTF32.initialize(arguments_1xTF32, workspace_1xTF32.get());
CUTLASS_CHECK(result.status);
//
// Launch initialized CUTLASS kernel
//
result.status = implicit_gemm_op_1xTF32();
CUTLASS_CHECK(result.status);
tensor_d_1xTF32.sync_host();
////////////////////////////////////////////////////////////////////////////////
// Run reference kernel (F64)
////////////////////////////////////////////////////////////////////////////////
cutlass::reference::device::Conv2d<
double,
LayoutInputA,
double,
LayoutInputB,
double,
LayoutOutput,
double,
double
>(
cutlass::conv::Operator::kFprop,
problem_size,
tensor_a_F64.device_ref(),
tensor_b_F64.device_ref(),
tensor_c_F64.device_ref(),
tensor_d_F64.device_ref(),
options.alpha,
options.beta);
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d_F64.sync_host();
////////////////////////////////////////////////////////////////////////////////
// Run reference kernel (F32)
////////////////////////////////////////////////////////////////////////////////
cutlass::reference::device::Conv2d<
float,
LayoutInputA,
float,
LayoutInputB,
float,
LayoutOutput,
float,
float
>(
cutlass::conv::Operator::kFprop,
problem_size,
tensor_a_F32.device_ref(),
tensor_b_F32.device_ref(),
tensor_c_F32.device_ref(),
tensor_d_F32.device_ref(),
options.alpha,
options.beta);
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d_F32.sync_host();
////////////////////////////////////////////////////////////////////////////////
/////// Compute l2 norms
////////////////////////////////////////////////////////////////////////////////
// l2 norm 3xTF32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_3xTF32_in_F64(options.output_size());
cutlass::reference::host::TensorCopy(tensor_d_3xTF32_in_F64.host_view(), tensor_d_3xTF32.host_view());
result.l2_norm_3xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_3xTF32_in_F64.host_view(), tensor_d_F64.host_view());
// l2 norm 1xTF32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_1xTF32_in_F64(options.output_size());
cutlass::reference::host::TensorCopy(tensor_d_1xTF32_in_F64.host_view(), tensor_d_1xTF32.host_view());
result.l2_norm_1xtf32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_1xTF32_in_F64.host_view(), tensor_d_F64.host_view());
// l2 norm F32 vs F64
cutlass::HostTensor<double, LayoutOutput> tensor_d_F32_in_F64(options.output_size());
cutlass::reference::host::TensorCopy(tensor_d_F32_in_F64.host_view(), tensor_d_F32.host_view());
result.l2_norm_fp32_vs_fp64 = cutlass::reference::host::TensorRelativeErrorMetric(
tensor_d_F32_in_F64.host_view(), tensor_d_F64.host_view());
///////////////////////////////////////////////////////////////////////////////
if (options.save_workspace) {
std::stringstream ss;
ss << "28_ampere_3xtf32_fast_accurate_tensorop_fprop_"
<< options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c()
<< "_"
<< options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c()
<< ".dat";
std::ofstream output_workspace(ss.str());
output_workspace
<< "Input = \n" << tensor_a_F32.host_view() << "\n\n"
<< "Filters = \n" << tensor_b_F32.host_view() << "\n\n";
output_workspace << "TF32x3 = \n" << tensor_d_3xTF32.host_view() << std::endl;
output_workspace << "TF32x1 = \n" << tensor_d_1xTF32.host_view() << std::endl;
output_workspace << "FP32 = \n" << tensor_d_F32.host_view() << std::endl;
output_workspace << "FP64 = \n" << tensor_d_F64.host_view() << "\n\n";
std::cout << "Results written to '" << ss.str() << "'." << std::endl;
}
return result;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
if (options.benchmark) {
// Benchmark several layers
int batch_sizes[] = {1, 32, 64, 128, 256};
struct Benchmark {
int h, w, c, k, r, s;
} layers[] = {
{56, 56, 64, 256, 1, 1},
{56, 56, 64, 64, 1, 1},
{56, 56, 64, 64, 3, 3},
{56, 56, 256, 64, 1, 1},
{56, 56, 256, 512, 1, 1},
{56, 56, 256, 128, 1, 1},
{28, 28, 128, 128, 3, 3},
{28, 28, 128, 512, 1, 1},
{28, 28, 512, 128, 1, 1},
{28, 28, 512, 1024, 1, 1},
{28, 28, 512, 256, 1, 1},
{14, 14, 256, 256, 3, 3},
{14, 14, 256, 1024, 1, 1},
{14, 14, 1024, 256, 1, 1},
{14, 14, 1024, 2048, 1, 1},
{14, 14, 1024, 512, 1, 1},
{7, 7, 512, 512, 3, 3},
};
Result::print_header(std::cout, options) << std::endl;
int idx = 1;
for (auto const &layer : layers) {
for (auto N : batch_sizes) {
options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c});
Result result = profile_convolution(options);
result.print(std::cout, idx, options) << std::endl;
}
++idx;
}
}
else {
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
Result result = profile_convolution(options);
Result::print_header(std::cout, options) << std::endl;
result.print(std::cout, 1, options) << std::endl;
}
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
cc0f0b85ad165eba8c3b99b3c0cbccce3b39c1ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../GenerateSquares.h"
#include <stdio.h>
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__ void permute_rows(short* new_rows, short* values, short* newSquares,
int order, int rowOffset, int myOffset)
{
for(short i = 0; i < order; i++)
{
for(short j = 0; j < order; j++)
{
newSquares[(i * order) + myOffset + rowOffset + j] = values[new_rows[i] * order + j];
}
}
}
__device__ void permute_cols(short* new_cols, short* values, short* newSquares,
int order, int colOffset, int myOffset)
{
for(short i = 0; i < order; i++)
{
for(short j = 0; j < order; j++)
{
newSquares[(i + myOffset + colOffset) + (j * order)] = values[j * order + new_cols[i]];
}
}
}
__device__ void permute_symbols(short* syms, short* values, short* newSquares,
int order, int symOffset, int myOffset)
{
short osq = order*order;
for(short i = 0; i < osq; i++)
{
newSquares[i + myOffset + symOffset] = syms[values[i]];
}
}
__global__ void generate_squares(short* squareList, int order, short* newSquares,
short* perms, int batchSize, int totalPerms)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < batchSize)
{
int osq = order*order;
int myOffset = idx * totalPerms * osq * 3; // where in the new square list is this thread's data?
int squareOffset = idx * osq; // where in the squares list is this thread's data?
// where after the offset to we start storing the data in the new square list
int rowOffset = 0;
int colOffset = osq;
int symOffset = 2*(osq);
short* my_square = (short*)malloc(sizeof(short) * osq); // add squareOffset to function calls
short* perm = (short*)malloc(sizeof(short)*order);
for(int i = 0; i < osq; i++)
{
my_square[i] = squareList[i + squareOffset];
}
for(int pCount = 0; pCount < totalPerms; pCount++)
{
for(int i = 0; i < order; i++)
{
perm[i] = perms[(pCount*order) + i];
}
permute_cols(perm, my_square, newSquares, order, colOffset, myOffset);
permute_rows(perm, my_square, newSquares, order, rowOffset, myOffset);
permute_symbols(perm, my_square, newSquares, order, symOffset, myOffset);
myOffset += (osq*3);
}
delete[] my_square; //!!!! ALWAYS FREE YOUR MEMORY !!!!!
delete[] perm;
}
}
void run_on_gpu(short* squaresToRun, int order, short* newSquares, short* perm,
int squareArraySize, int permArraySize, int newSquareArraySize,
int squaresToCheck, int totalPerms, short *dev_squares, short* dev_perm, short* dev_new_squares)
{
hipMemcpy(dev_squares, squaresToRun, squareArraySize, hipMemcpyHostToDevice);
hipMemcpy(dev_perm, perm, permArraySize, hipMemcpyHostToDevice);
hipMemcpy(dev_new_squares, newSquares, newSquareArraySize, hipMemcpyHostToDevice);
// how many blocks do we need if we use nThreads threads?
int nThreads = 128;
int nBlocks = (squareArraySize + nThreads - 1) / nThreads;
hipLaunchKernelGGL(( generate_squares), dim3(nBlocks), dim3(nThreads), 0, 0, dev_squares, order, dev_new_squares, dev_perm, squaresToCheck, totalPerms);
hipMemcpy(newSquares, dev_new_squares, newSquareArraySize, hipMemcpyDeviceToHost);
//gpuErrchk(hipPeekAtLastError());
//gpuErrchk(hipMemcpy(newSquares, dev_new_squares, newSquareArraySize, hipMemcpyDeviceToHost));
//gpuErrchk(hipPeekAtLastError());
//gpuErrchk(hipDeviceSynchronize());
}
void copy_to_vectors(short* newSquares, unordered_set<string> &appendToSquares,
int numberSquares, int order, int totalPerms)
{
int osq = order*order;
long iterRange = numberSquares*totalPerms*3*osq;
for(long i = 0; i < iterRange; i+=osq)
{
//TODO: create has function for matrices; store hash rather than squares
//TODO: can then compare hash values to see if a square has been generated
//TODO: squares can then be written to a file on a separate thread
//TODO: (or multiple files to be combined at the end).
// heap or stack? should this be created and destroyed dynamically?
// ans: needs to be dynamic to be ablle to call normalize/reduce
short* values = new short[osq];
memcpy(values, &newSquares[i], osq*sizeof(short));
LatinSquare sq = LatinSquare(order, values);
//sq.reduce();
sq.normalize();
appendToSquares.insert(sq.flatstring_no_space());
// NOTES! adding only the normalized or reduced squares produces the
// correct results (order 5 = 56 normalized and 1344 reduced)
//delete[] values;
}
}
int main(int argc, char* argv[])
{
// timers
clock_t start, end;
start = clock();
// TODO: make some things globals (e.g. order, osq) to stop passing it around
if (argc < 3)
{
print_usage();
return 0;
}
short order = stoi(string(argv[1]));
short osq = order*order;
string filename_iso = string(argv[2]);
string filename_3 = "3_perm.dat";
string filename_n = to_string(order) + "_perm.dat";
bool cont = true;
if (!file_exists(filename_n))
{
cout << filename_n << " does not exist. Please use the utilites to generate the file." << endl;
cont = false;
}
if(!file_exists(filename_iso))
{
cout << filename_iso << " does not exist." << endl;
cont = false;
}
if (!cont)
return 0;
ifstream isofile; isofile.open(filename_iso);
string line;
// unordered_map<string, LatinSquare> allSqs;
unordered_set<string> allSqs; // TODO: hash function to take this from strings -> doubles
vector<short*> checkSqs; // squares to permute, do not permute all squares everytime
while(getline(isofile, line))
{
LatinSquare isoSq(order, get_array_from_line(line, osq));
// allSqs.insert(make_pair(isoSq.flatstring_no_space(), isoSq));
allSqs.insert(isoSq.flatstring_no_space());
checkSqs.push_back(isoSq.get_values());
}
isofile.close();
long totalPerms = my_factorial(order);
short* perms = (short*)malloc(sizeof(short) * totalPerms * order);
vector<short*> permVec;
ifstream permfile; permfile.open(filename_n);
string permline;
int count = 0;
while(getline(permfile, permline))
{
short* permArr = get_array_from_line(permline, order);
permVec.push_back(permArr);
for(int i = 0; i < order; i++)
{
perms[(count*order) + i] = permArr[i];
}
count++;
}
permfile.close();
start = clock();
// some random value (maybe keep it divisible by nThreads which should be a multiple of 32)
int maxBatchSize = 768;
// 1050ti = 768
// 2080ti = 4352; // number of cores? (sure, although it might eat ram)
long unsigned int numSqs;
int permArraySize = order * sizeof(short) * totalPerms;
int lastSquaresToCheck = 0;
short* dev_squares; short* dev_perm; short* dev_new_squares;
do {
numSqs = allSqs.size();
unordered_set<string> newSqMap;
// TODO: add a permutation batch
// TODO: permute one isotopy class representative at a time -> write to file -> permute next iso rep (this generates all squares in iso class)
/* START: Process each batch of 'maxBatchSize' squares */
int checkedSquares = 0;
int reportCount = 0;
while(checkedSquares < checkSqs.size())
{
if(checkedSquares / (SQ_CHECK_REPORT) > reportCount && checkedSquares > 0)
{
printf("Checked %d out of %ld squares\n", checkedSquares, checkSqs.size());
reportCount++;
}
// only process up to maxBatchSize, in batches, to conserve RAM
int squaresToCheck = (checkSqs.size() - checkedSquares) > maxBatchSize
? maxBatchSize : (checkSqs.size() - checkedSquares);
int squareArraySize = squaresToCheck * osq * sizeof(short);
int newSquareArraySize = squareArraySize * totalPerms * 3;
short* squares = (short*)malloc(squareArraySize);
short* newSquares = (short*)malloc(newSquareArraySize);
/* START: flatten out the checkSqs set */
for(int i = 0; i < squaresToCheck; i++) // each square
{
// start at the last index (ignore first squares that have been checked)
short* values = checkSqs.at(checkedSquares + i);
for(int j = 0; j < osq; j++)
{
squares[(i*osq) + j] = values[j];
}
}
/* END: flatten out the checkSqs set */
if(lastSquaresToCheck != squaresToCheck)
{
if(lastSquaresToCheck > 0)
{
hipFree(dev_squares);
hipFree(dev_perm);
hipFree(dev_new_squares);
}
hipMalloc((void**)&dev_squares, squareArraySize);
hipMalloc((void**)&dev_perm, permArraySize);
hipMalloc((void**)&dev_new_squares, newSquareArraySize);
}
run_on_gpu(squares, order, newSquares, perms, squareArraySize,
permArraySize, newSquareArraySize, squaresToCheck, totalPerms,
dev_squares, dev_perm, dev_new_squares);
// need to store newSqMap here instead so that we can only add
// new unique squares to the checkSqs vector
cout << "BEFORE copy_to_vectors: " << allSqs.size() << " " << checkSqs.size() << " " << newSqMap.size() << endl;
copy_to_vectors(newSquares, newSqMap, squaresToCheck, order, totalPerms);
cout << "AFTER copy_to_vectors: " << allSqs.size() << " " << checkSqs.size() << " " << newSqMap.size() << endl;
checkedSquares += squaresToCheck;
lastSquaresToCheck = squaresToCheck;
delete[] squares;
delete[] newSquares;
}
/* END: Process each batch of 'maxBatchSize' squares */
checkSqs.clear();
pair<unordered_set<string>::iterator, bool> returnValue;
for(auto it = newSqMap.begin(); it != newSqMap.end(); it++)
{
string lsString = (*it);
returnValue = allSqs.insert(lsString);
if(returnValue.second) // the LS was added to the set so add it to the checksqs vector
{
short* values = get_array_from_line(lsString, osq);
checkSqs.push_back(values);
}
}
newSqMap.clear();
cout << "Start Count: " << numSqs << ", End Count: " << allSqs.size() << endl;
} while(numSqs < allSqs.size());
end = clock();
double timeTaken = double(end-start) / double(CLOCKS_PER_SEC);
cout << "CUDA Time Taken: " << timeTaken << " seconds" << endl;
ofstream sqfile; sqfile.open(to_string(order) + "_squares.dat");
for(auto it = allSqs.begin(); it != allSqs.end(); it++)
{
sqfile << (*it) << endl;
}
sqfile.close();
return 0;
}
| cc0f0b85ad165eba8c3b99b3c0cbccce3b39c1ab.cu | #include "../GenerateSquares.h"
#include <stdio.h>
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__ void permute_rows(short* new_rows, short* values, short* newSquares,
int order, int rowOffset, int myOffset)
{
for(short i = 0; i < order; i++)
{
for(short j = 0; j < order; j++)
{
newSquares[(i * order) + myOffset + rowOffset + j] = values[new_rows[i] * order + j];
}
}
}
__device__ void permute_cols(short* new_cols, short* values, short* newSquares,
int order, int colOffset, int myOffset)
{
for(short i = 0; i < order; i++)
{
for(short j = 0; j < order; j++)
{
newSquares[(i + myOffset + colOffset) + (j * order)] = values[j * order + new_cols[i]];
}
}
}
__device__ void permute_symbols(short* syms, short* values, short* newSquares,
int order, int symOffset, int myOffset)
{
short osq = order*order;
for(short i = 0; i < osq; i++)
{
newSquares[i + myOffset + symOffset] = syms[values[i]];
}
}
__global__ void generate_squares(short* squareList, int order, short* newSquares,
short* perms, int batchSize, int totalPerms)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < batchSize)
{
int osq = order*order;
int myOffset = idx * totalPerms * osq * 3; // where in the new square list is this thread's data?
int squareOffset = idx * osq; // where in the squares list is this thread's data?
// where after the offset to we start storing the data in the new square list
int rowOffset = 0;
int colOffset = osq;
int symOffset = 2*(osq);
short* my_square = (short*)malloc(sizeof(short) * osq); // add squareOffset to function calls
short* perm = (short*)malloc(sizeof(short)*order);
for(int i = 0; i < osq; i++)
{
my_square[i] = squareList[i + squareOffset];
}
for(int pCount = 0; pCount < totalPerms; pCount++)
{
for(int i = 0; i < order; i++)
{
perm[i] = perms[(pCount*order) + i];
}
permute_cols(perm, my_square, newSquares, order, colOffset, myOffset);
permute_rows(perm, my_square, newSquares, order, rowOffset, myOffset);
permute_symbols(perm, my_square, newSquares, order, symOffset, myOffset);
myOffset += (osq*3);
}
delete[] my_square; //!!!! ALWAYS FREE YOUR MEMORY !!!!!
delete[] perm;
}
}
void run_on_gpu(short* squaresToRun, int order, short* newSquares, short* perm,
int squareArraySize, int permArraySize, int newSquareArraySize,
int squaresToCheck, int totalPerms, short *dev_squares, short* dev_perm, short* dev_new_squares)
{
cudaMemcpy(dev_squares, squaresToRun, squareArraySize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_perm, perm, permArraySize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_new_squares, newSquares, newSquareArraySize, cudaMemcpyHostToDevice);
// how many blocks do we need if we use nThreads threads?
int nThreads = 128;
int nBlocks = (squareArraySize + nThreads - 1) / nThreads;
generate_squares<<<nBlocks, nThreads>>>(dev_squares, order, dev_new_squares, dev_perm, squaresToCheck, totalPerms);
cudaMemcpy(newSquares, dev_new_squares, newSquareArraySize, cudaMemcpyDeviceToHost);
//gpuErrchk(cudaPeekAtLastError());
//gpuErrchk(cudaMemcpy(newSquares, dev_new_squares, newSquareArraySize, cudaMemcpyDeviceToHost));
//gpuErrchk(cudaPeekAtLastError());
//gpuErrchk(cudaDeviceSynchronize());
}
void copy_to_vectors(short* newSquares, unordered_set<string> &appendToSquares,
int numberSquares, int order, int totalPerms)
{
int osq = order*order;
long iterRange = numberSquares*totalPerms*3*osq;
for(long i = 0; i < iterRange; i+=osq)
{
//TODO: create has function for matrices; store hash rather than squares
//TODO: can then compare hash values to see if a square has been generated
//TODO: squares can then be written to a file on a separate thread
//TODO: (or multiple files to be combined at the end).
// heap or stack? should this be created and destroyed dynamically?
// ans: needs to be dynamic to be ablle to call normalize/reduce
short* values = new short[osq];
memcpy(values, &newSquares[i], osq*sizeof(short));
LatinSquare sq = LatinSquare(order, values);
//sq.reduce();
sq.normalize();
appendToSquares.insert(sq.flatstring_no_space());
// NOTES! adding only the normalized or reduced squares produces the
// correct results (order 5 = 56 normalized and 1344 reduced)
//delete[] values;
}
}
int main(int argc, char* argv[])
{
// timers
clock_t start, end;
start = clock();
// TODO: make some things globals (e.g. order, osq) to stop passing it around
if (argc < 3)
{
print_usage();
return 0;
}
short order = stoi(string(argv[1]));
short osq = order*order;
string filename_iso = string(argv[2]);
string filename_3 = "3_perm.dat";
string filename_n = to_string(order) + "_perm.dat";
bool cont = true;
if (!file_exists(filename_n))
{
cout << filename_n << " does not exist. Please use the utilites to generate the file." << endl;
cont = false;
}
if(!file_exists(filename_iso))
{
cout << filename_iso << " does not exist." << endl;
cont = false;
}
if (!cont)
return 0;
ifstream isofile; isofile.open(filename_iso);
string line;
// unordered_map<string, LatinSquare> allSqs;
unordered_set<string> allSqs; // TODO: hash function to take this from strings -> doubles
vector<short*> checkSqs; // squares to permute, do not permute all squares everytime
while(getline(isofile, line))
{
LatinSquare isoSq(order, get_array_from_line(line, osq));
// allSqs.insert(make_pair(isoSq.flatstring_no_space(), isoSq));
allSqs.insert(isoSq.flatstring_no_space());
checkSqs.push_back(isoSq.get_values());
}
isofile.close();
long totalPerms = my_factorial(order);
short* perms = (short*)malloc(sizeof(short) * totalPerms * order);
vector<short*> permVec;
ifstream permfile; permfile.open(filename_n);
string permline;
int count = 0;
while(getline(permfile, permline))
{
short* permArr = get_array_from_line(permline, order);
permVec.push_back(permArr);
for(int i = 0; i < order; i++)
{
perms[(count*order) + i] = permArr[i];
}
count++;
}
permfile.close();
start = clock();
// some random value (maybe keep it divisible by nThreads which should be a multiple of 32)
int maxBatchSize = 768;
// 1050ti = 768
// 2080ti = 4352; // number of cores? (sure, although it might eat ram)
long unsigned int numSqs;
int permArraySize = order * sizeof(short) * totalPerms;
int lastSquaresToCheck = 0;
short* dev_squares; short* dev_perm; short* dev_new_squares;
do {
numSqs = allSqs.size();
unordered_set<string> newSqMap;
// TODO: add a permutation batch
// TODO: permute one isotopy class representative at a time -> write to file -> permute next iso rep (this generates all squares in iso class)
/* START: Process each batch of 'maxBatchSize' squares */
int checkedSquares = 0;
int reportCount = 0;
while(checkedSquares < checkSqs.size())
{
if(checkedSquares / (SQ_CHECK_REPORT) > reportCount && checkedSquares > 0)
{
printf("Checked %d out of %ld squares\n", checkedSquares, checkSqs.size());
reportCount++;
}
// only process up to maxBatchSize, in batches, to conserve RAM
int squaresToCheck = (checkSqs.size() - checkedSquares) > maxBatchSize
? maxBatchSize : (checkSqs.size() - checkedSquares);
int squareArraySize = squaresToCheck * osq * sizeof(short);
int newSquareArraySize = squareArraySize * totalPerms * 3;
short* squares = (short*)malloc(squareArraySize);
short* newSquares = (short*)malloc(newSquareArraySize);
/* START: flatten out the checkSqs set */
for(int i = 0; i < squaresToCheck; i++) // each square
{
// start at the last index (ignore first squares that have been checked)
short* values = checkSqs.at(checkedSquares + i);
for(int j = 0; j < osq; j++)
{
squares[(i*osq) + j] = values[j];
}
}
/* END: flatten out the checkSqs set */
if(lastSquaresToCheck != squaresToCheck)
{
if(lastSquaresToCheck > 0)
{
cudaFree(dev_squares);
cudaFree(dev_perm);
cudaFree(dev_new_squares);
}
cudaMalloc((void**)&dev_squares, squareArraySize);
cudaMalloc((void**)&dev_perm, permArraySize);
cudaMalloc((void**)&dev_new_squares, newSquareArraySize);
}
run_on_gpu(squares, order, newSquares, perms, squareArraySize,
permArraySize, newSquareArraySize, squaresToCheck, totalPerms,
dev_squares, dev_perm, dev_new_squares);
// need to store newSqMap here instead so that we can only add
// new unique squares to the checkSqs vector
cout << "BEFORE copy_to_vectors: " << allSqs.size() << " " << checkSqs.size() << " " << newSqMap.size() << endl;
copy_to_vectors(newSquares, newSqMap, squaresToCheck, order, totalPerms);
cout << "AFTER copy_to_vectors: " << allSqs.size() << " " << checkSqs.size() << " " << newSqMap.size() << endl;
checkedSquares += squaresToCheck;
lastSquaresToCheck = squaresToCheck;
delete[] squares;
delete[] newSquares;
}
/* END: Process each batch of 'maxBatchSize' squares */
checkSqs.clear();
pair<unordered_set<string>::iterator, bool> returnValue;
for(auto it = newSqMap.begin(); it != newSqMap.end(); it++)
{
string lsString = (*it);
returnValue = allSqs.insert(lsString);
if(returnValue.second) // the LS was added to the set so add it to the checksqs vector
{
short* values = get_array_from_line(lsString, osq);
checkSqs.push_back(values);
}
}
newSqMap.clear();
cout << "Start Count: " << numSqs << ", End Count: " << allSqs.size() << endl;
} while(numSqs < allSqs.size());
end = clock();
double timeTaken = double(end-start) / double(CLOCKS_PER_SEC);
cout << "CUDA Time Taken: " << timeTaken << " seconds" << endl;
ofstream sqfile; sqfile.open(to_string(order) + "_squares.dat");
for(auto it = allSqs.begin(); it != allSqs.end(); it++)
{
sqfile << (*it) << endl;
}
sqfile.close();
return 0;
}
|
39b0b4327b6cd27a9787d5e0321d64dc1d4a5363.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <hip/driver_types.h>
#include <hip/hip_runtime_api.h>
#include "cuda8803ss.h"
// CUDA must already have been initialized before calling cudaid().
#define CUDASTRLEN 80
static int
id_cuda(int dev,unsigned *mem,unsigned *tmem,int *state){
struct hipDeviceProp_t dprop;
int major,minor,attr,cerr;
void *str = NULL;
hipCtx_t ctx;
hipDevice_t c;
*state = 0;
if((cerr = hipDeviceGet(&c,dev)) != hipSuccess){
fprintf(stderr," Couldn't associative with device (%d)\n",cerr);
return cerr;
}
if((cerr = hipGetDeviceProperties(&dprop,dev)) != hipSuccess){
fprintf(stderr," Couldn't get device properties (%d)\n",cerr);
return cerr;
}
cerr = hipDeviceGetAttribute(&attr,hipDeviceAttributeWarpSize,c);
if(cerr != hipSuccess || attr <= 0){
return cerr;
}
cerr = hipDeviceGetAttribute(&attr,hipDeviceAttributeMultiprocessorCount,c);
if(cerr != hipSuccess || attr <= 0){
return cerr;
}
if((cerr = hipDeviceComputeCapability(&major,&minor,c)) != hipSuccess){
return cerr;
}
if((str = malloc(CUDASTRLEN)) == NULL){
return -1;
}
if((cerr = hipDeviceGetName((char *)str,CUDASTRLEN,c)) != hipSuccess){
goto err;
}
if((cerr = hipCtxCreate(&ctx,HIP_CTX_MAP_HOST|HIP_CTX_SCHED_YIELD,c)) != hipSuccess){
fprintf(stderr," Couldn't create context (%d)\n",cerr);
goto err;
}
if((cerr = cuMemGetInfo(mem,tmem)) != hipSuccess){
hipCtxDetach(ctx);
goto err;
}
*state = dprop.computeMode;
if(printf("%d.%d %s %s %u/%uMB free %s\n",
major,minor,
dprop.integrated ? "Integrated" : "Standalone",(char *)str,
*mem / (1024 * 1024) + !!(*mem / (1024 * 1024)),
*tmem / (1024 * 1024) + !!(*tmem / (1024 * 1024)),
*state == hipComputeModeExclusive ? "(exclusive)" :
*state == hipComputeModeProhibited ? "(prohibited)" :
*state == hipComputeModeDefault ? "(shared)" :
"(unknown compute mode)") < 0){
cerr = -1;
goto err;
}
free(str);
return hipSuccess;
err: // cerr ought already be set!
free(str);
return cerr;
}
__device__ __constant__ unsigned constptr[1];
__global__ void constkernel(const unsigned *constmax){
__shared__ unsigned psum[BLOCK_SIZE];
unsigned *ptr;
psum[threadIdx.x] = 0;
// Accesses below 64k result in immediate termination, due to use of
// the .global state space (2.0 provides unified addressing, which can
// overcome this). That area's reserved for constant memory (.const
// state space; see 5.1.3 of the PTX 2.0 Reference), from what I see.
for(ptr = constptr ; ptr < constmax ; ptr += BLOCK_SIZE){
++psum[threadIdx.x];
if(ptr[threadIdx.x]){
++psum[threadIdx.x];
}
}
ptr = constptr;
while((uintptr_t)ptr > threadIdx.x * sizeof(unsigned)){
++psum[threadIdx.x];
if(*(ptr - threadIdx.x)){
++psum[threadIdx.x];
}
ptr -= BLOCK_SIZE;
}
}
#define CONSTWIN ((unsigned *)0x10000u)
#define NOMANSPACE (0x1000u)
static int
check_const_ram(const unsigned *max){
dim3 dblock(BLOCK_SIZE,1,1);
dim3 dgrid(1,1,1);
printf(" Verifying %jub constant memory...",(uintmax_t)max);
fflush(stdout);
hipLaunchKernelGGL(( constkernel), dim3(dblock),dim3(dgrid), 0, 0, max);
if(hipCtxSynchronize()){
hipError_t err;
err = hipGetLastError();
fprintf(stderr,"\n Error verifying constant CUDA memory (%s?)\n",
hipGetErrorString(err));
return -1;
}
printf("good.\n");
return 0;
}
#define RANGER "out/cudaranger"
static int
divide_address_space(int devno,uintmax_t off,uintmax_t s,unsigned unit,
unsigned gran,uint32_t *results,
uintmax_t *worked){
char min[40],max[40],dev[20];
char * const argv[] = { RANGER, dev, min, max, NULL };
pid_t pid;
if((size_t)snprintf(dev,sizeof(dev),"%d",devno) >= sizeof(dev)){
fprintf(stderr," Invalid device argument: %d\n",devno);
return -1;
}
while(s){
uintmax_t ts;
int status;
pid_t w;
ts = s > gran ? gran : s;
s -= ts;
if((size_t)snprintf(min,sizeof(min),"0x%jx",off) >= sizeof(min) ||
(size_t)snprintf(max,sizeof(max),"0x%jx",off + ts) >= sizeof(max)){
fprintf(stderr," Invalid arguments: 0x%jx 0x%jx\n",off,off + ts);
return -1;
}
off += ts;
//printf("CALL: %s %s %s\n",dev,min,max);
if((pid = fork()) < 0){
fprintf(stderr," Couldn't fork (%s?)!\n",strerror(errno));
return -1;
}else if(pid == 0){
if(execvp(RANGER,argv)){
fprintf(stderr," Couldn't exec %s (%s?)!\n",RANGER,strerror(errno));
}
exit(CUDARANGER_EXIT_ERROR);
}
while((w = wait(&status)) != pid){
if(w < 0){
fprintf(stderr," Error waiting (%s?)!\n",
strerror(errno));
return -1;
}
}
if(!WIFEXITED(status) || WEXITSTATUS(status) == CUDARANGER_EXIT_ERROR){
fprintf(stderr," Exception running %s %s %s %s\n",
argv[0],argv[1],argv[2],argv[3]);
return -1;
}else if(WEXITSTATUS(status) == CUDARANGER_EXIT_SUCCESS){
*worked += ts;
}else if(WEXITSTATUS(status) != CUDARANGER_EXIT_CUDAFAIL){
fprintf(stderr," Unknown result code %d running"
" %s %s %s %s\n",WEXITSTATUS(status),
argv[0],argv[1],argv[2],argv[3]);
return -1;
} // otherwise, normal failure
}
return 0;
}
static int
cudadump(int devno,uintmax_t tmem,unsigned unit,uintmax_t gran,uint32_t *results){
uintmax_t worked = 0,s;
hipDeviceptr_t ptr;
if(check_const_ram(CONSTWIN)){
return -1;
}
if((s = cuda_alloc_max(stdout,&ptr,unit)) == 0){
return -1;
}
printf(" Allocated %ju of %ju MB (%f%%) at 0x%jx:0x%jx\n",
s / (1024 * 1024) + !!(s % (1024 * 1024)),
tmem / (1024 * 1024) + !!(tmem % (1024 * 1024)),
(float)s / tmem * 100,(uintmax_t)ptr,(uintmax_t)ptr + s);
printf(" Verifying allocated region...\n");
if(dump_cuda(ptr,ptr + (s / gran) * gran,unit,results)){
hipFree(ptr);
fprintf(stderr," Sanity check failed!\n");
return -1;
}
if(hipFree(ptr)){
fprintf(stderr," Error freeing CUDA memory (%s?)\n",
hipGetErrorString(hipGetLastError()));
return -1;
}
printf(" Dumping %jub...\n",tmem);
if(divide_address_space(devno,NOMANSPACE,tmem,unit,gran,results,&worked)){
fprintf(stderr," Error probing CUDA memory!\n");
return -1;
}
printf(" Readable: %jub/%jub (%f%%)\n",worked,tmem,(float)worked / tmem * 100);
worked = 0;
printf(" Dumping address space (%jub)...\n",(uintmax_t)0x100000000ull - NOMANSPACE);
if(divide_address_space(devno,NOMANSPACE,0x100000000ull - NOMANSPACE,unit,gran,results,&worked)){
fprintf(stderr," Error probing CUDA memory!\n");
return -1;
}
printf(" Readable: %jub/%jub (%f%%)\n",worked,0x100000000ull,(float)worked / 0x100000000 * 100);
printf(" Success.\n");
return 0;
}
#define GRAN_DEFAULT 4ul * 1024ul * 1024ul
static void
usage(const char *a0,int status){
fprintf(stderr,"usage: %s [granularity]\n",a0);
fprintf(stderr," default granularity: %lu\n",GRAN_DEFAULT);
exit(status);
}
int main(int argc,char **argv){
unsigned long gran;
unsigned unit = 4; // Minimum alignment of references
int z,count;
if(argc > 2){
usage(argv[0],EXIT_FAILURE);
}else if(argc == 2){
if(getzul(argv[1],&gran)){
usage(argv[0],EXIT_FAILURE);
}
}else{
gran = GRAN_DEFAULT;
}
if(init_cuda_alldevs(&count)){
return EXIT_FAILURE;
}
printf("CUDA device count: %d\n",count);
for(z = 0 ; z < count ; ++z){
uint32_t hostresarr[GRID_SIZE * BLOCK_SIZE];
unsigned mem,tmem;
uint32_t *resarr;
int state;
printf(" %03d ",z);
if(id_cuda(z,&mem,&tmem,&state)){
return EXIT_FAILURE;
}
if(state != hipComputeModeDefault){
printf(" Skipping device (put it in shared mode).\n",z);
continue;
}
if(hipMalloc(&resarr,sizeof(hostresarr)) || hipMemset(resarr,0,sizeof(hostresarr))){
fprintf(stderr," Couldn't allocate result array (%s?)\n",
hipGetErrorString(hipGetLastError()));
return EXIT_FAILURE;
}
if(cudadump(z,tmem,unit,gran,resarr)){
return EXIT_FAILURE;
}
if(hipMemcpy(hostresarr,resarr,sizeof(hostresarr),hipMemcpyDeviceToHost) || hipFree(resarr)){
fprintf(stderr," Couldn't free result array (%s?)\n",
hipGetErrorString(hipGetLastError()));
return EXIT_FAILURE;
}
}
return EXIT_SUCCESS;
}
| 39b0b4327b6cd27a9787d5e0321d64dc1d4a5363.cu | #include <cuda.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <driver_types.h>
#include <cuda_runtime_api.h>
#include "cuda8803ss.h"
// CUDA must already have been initialized before calling cudaid().
#define CUDASTRLEN 80
static int
id_cuda(int dev,unsigned *mem,unsigned *tmem,int *state){
struct cudaDeviceProp dprop;
int major,minor,attr,cerr;
void *str = NULL;
CUcontext ctx;
CUdevice c;
*state = 0;
if((cerr = cuDeviceGet(&c,dev)) != CUDA_SUCCESS){
fprintf(stderr," Couldn't associative with device (%d)\n",cerr);
return cerr;
}
if((cerr = cudaGetDeviceProperties(&dprop,dev)) != CUDA_SUCCESS){
fprintf(stderr," Couldn't get device properties (%d)\n",cerr);
return cerr;
}
cerr = cuDeviceGetAttribute(&attr,CU_DEVICE_ATTRIBUTE_WARP_SIZE,c);
if(cerr != CUDA_SUCCESS || attr <= 0){
return cerr;
}
cerr = cuDeviceGetAttribute(&attr,CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT,c);
if(cerr != CUDA_SUCCESS || attr <= 0){
return cerr;
}
if((cerr = cuDeviceComputeCapability(&major,&minor,c)) != CUDA_SUCCESS){
return cerr;
}
if((str = malloc(CUDASTRLEN)) == NULL){
return -1;
}
if((cerr = cuDeviceGetName((char *)str,CUDASTRLEN,c)) != CUDA_SUCCESS){
goto err;
}
if((cerr = cuCtxCreate(&ctx,CU_CTX_MAP_HOST|CU_CTX_SCHED_YIELD,c)) != CUDA_SUCCESS){
fprintf(stderr," Couldn't create context (%d)\n",cerr);
goto err;
}
if((cerr = cuMemGetInfo(mem,tmem)) != CUDA_SUCCESS){
cuCtxDetach(ctx);
goto err;
}
*state = dprop.computeMode;
if(printf("%d.%d %s %s %u/%uMB free %s\n",
major,minor,
dprop.integrated ? "Integrated" : "Standalone",(char *)str,
*mem / (1024 * 1024) + !!(*mem / (1024 * 1024)),
*tmem / (1024 * 1024) + !!(*tmem / (1024 * 1024)),
*state == CU_COMPUTEMODE_EXCLUSIVE ? "(exclusive)" :
*state == CU_COMPUTEMODE_PROHIBITED ? "(prohibited)" :
*state == CU_COMPUTEMODE_DEFAULT ? "(shared)" :
"(unknown compute mode)") < 0){
cerr = -1;
goto err;
}
free(str);
return CUDA_SUCCESS;
err: // cerr ought already be set!
free(str);
return cerr;
}
__device__ __constant__ unsigned constptr[1];
__global__ void constkernel(const unsigned *constmax){
__shared__ unsigned psum[BLOCK_SIZE];
unsigned *ptr;
psum[threadIdx.x] = 0;
// Accesses below 64k result in immediate termination, due to use of
// the .global state space (2.0 provides unified addressing, which can
// overcome this). That area's reserved for constant memory (.const
// state space; see 5.1.3 of the PTX 2.0 Reference), from what I see.
for(ptr = constptr ; ptr < constmax ; ptr += BLOCK_SIZE){
++psum[threadIdx.x];
if(ptr[threadIdx.x]){
++psum[threadIdx.x];
}
}
ptr = constptr;
while((uintptr_t)ptr > threadIdx.x * sizeof(unsigned)){
++psum[threadIdx.x];
if(*(ptr - threadIdx.x)){
++psum[threadIdx.x];
}
ptr -= BLOCK_SIZE;
}
}
#define CONSTWIN ((unsigned *)0x10000u)
#define NOMANSPACE (0x1000u)
static int
check_const_ram(const unsigned *max){
dim3 dblock(BLOCK_SIZE,1,1);
dim3 dgrid(1,1,1);
printf(" Verifying %jub constant memory...",(uintmax_t)max);
fflush(stdout);
constkernel<<<dblock,dgrid>>>(max);
if(cuCtxSynchronize()){
cudaError_t err;
err = cudaGetLastError();
fprintf(stderr,"\n Error verifying constant CUDA memory (%s?)\n",
cudaGetErrorString(err));
return -1;
}
printf("good.\n");
return 0;
}
#define RANGER "out/cudaranger"
static int
divide_address_space(int devno,uintmax_t off,uintmax_t s,unsigned unit,
unsigned gran,uint32_t *results,
uintmax_t *worked){
char min[40],max[40],dev[20];
char * const argv[] = { RANGER, dev, min, max, NULL };
pid_t pid;
if((size_t)snprintf(dev,sizeof(dev),"%d",devno) >= sizeof(dev)){
fprintf(stderr," Invalid device argument: %d\n",devno);
return -1;
}
while(s){
uintmax_t ts;
int status;
pid_t w;
ts = s > gran ? gran : s;
s -= ts;
if((size_t)snprintf(min,sizeof(min),"0x%jx",off) >= sizeof(min) ||
(size_t)snprintf(max,sizeof(max),"0x%jx",off + ts) >= sizeof(max)){
fprintf(stderr," Invalid arguments: 0x%jx 0x%jx\n",off,off + ts);
return -1;
}
off += ts;
//printf("CALL: %s %s %s\n",dev,min,max);
if((pid = fork()) < 0){
fprintf(stderr," Couldn't fork (%s?)!\n",strerror(errno));
return -1;
}else if(pid == 0){
if(execvp(RANGER,argv)){
fprintf(stderr," Couldn't exec %s (%s?)!\n",RANGER,strerror(errno));
}
exit(CUDARANGER_EXIT_ERROR);
}
while((w = wait(&status)) != pid){
if(w < 0){
fprintf(stderr," Error waiting (%s?)!\n",
strerror(errno));
return -1;
}
}
if(!WIFEXITED(status) || WEXITSTATUS(status) == CUDARANGER_EXIT_ERROR){
fprintf(stderr," Exception running %s %s %s %s\n",
argv[0],argv[1],argv[2],argv[3]);
return -1;
}else if(WEXITSTATUS(status) == CUDARANGER_EXIT_SUCCESS){
*worked += ts;
}else if(WEXITSTATUS(status) != CUDARANGER_EXIT_CUDAFAIL){
fprintf(stderr," Unknown result code %d running"
" %s %s %s %s\n",WEXITSTATUS(status),
argv[0],argv[1],argv[2],argv[3]);
return -1;
} // otherwise, normal failure
}
return 0;
}
static int
cudadump(int devno,uintmax_t tmem,unsigned unit,uintmax_t gran,uint32_t *results){
uintmax_t worked = 0,s;
CUdeviceptr ptr;
if(check_const_ram(CONSTWIN)){
return -1;
}
if((s = cuda_alloc_max(stdout,&ptr,unit)) == 0){
return -1;
}
printf(" Allocated %ju of %ju MB (%f%%) at 0x%jx:0x%jx\n",
s / (1024 * 1024) + !!(s % (1024 * 1024)),
tmem / (1024 * 1024) + !!(tmem % (1024 * 1024)),
(float)s / tmem * 100,(uintmax_t)ptr,(uintmax_t)ptr + s);
printf(" Verifying allocated region...\n");
if(dump_cuda(ptr,ptr + (s / gran) * gran,unit,results)){
cuMemFree(ptr);
fprintf(stderr," Sanity check failed!\n");
return -1;
}
if(cuMemFree(ptr)){
fprintf(stderr," Error freeing CUDA memory (%s?)\n",
cudaGetErrorString(cudaGetLastError()));
return -1;
}
printf(" Dumping %jub...\n",tmem);
if(divide_address_space(devno,NOMANSPACE,tmem,unit,gran,results,&worked)){
fprintf(stderr," Error probing CUDA memory!\n");
return -1;
}
printf(" Readable: %jub/%jub (%f%%)\n",worked,tmem,(float)worked / tmem * 100);
worked = 0;
printf(" Dumping address space (%jub)...\n",(uintmax_t)0x100000000ull - NOMANSPACE);
if(divide_address_space(devno,NOMANSPACE,0x100000000ull - NOMANSPACE,unit,gran,results,&worked)){
fprintf(stderr," Error probing CUDA memory!\n");
return -1;
}
printf(" Readable: %jub/%jub (%f%%)\n",worked,0x100000000ull,(float)worked / 0x100000000 * 100);
printf(" Success.\n");
return 0;
}
#define GRAN_DEFAULT 4ul * 1024ul * 1024ul
static void
usage(const char *a0,int status){
fprintf(stderr,"usage: %s [granularity]\n",a0);
fprintf(stderr," default granularity: %lu\n",GRAN_DEFAULT);
exit(status);
}
int main(int argc,char **argv){
unsigned long gran;
unsigned unit = 4; // Minimum alignment of references
int z,count;
if(argc > 2){
usage(argv[0],EXIT_FAILURE);
}else if(argc == 2){
if(getzul(argv[1],&gran)){
usage(argv[0],EXIT_FAILURE);
}
}else{
gran = GRAN_DEFAULT;
}
if(init_cuda_alldevs(&count)){
return EXIT_FAILURE;
}
printf("CUDA device count: %d\n",count);
for(z = 0 ; z < count ; ++z){
uint32_t hostresarr[GRID_SIZE * BLOCK_SIZE];
unsigned mem,tmem;
uint32_t *resarr;
int state;
printf(" %03d ",z);
if(id_cuda(z,&mem,&tmem,&state)){
return EXIT_FAILURE;
}
if(state != CU_COMPUTEMODE_DEFAULT){
printf(" Skipping device (put it in shared mode).\n",z);
continue;
}
if(cudaMalloc(&resarr,sizeof(hostresarr)) || cudaMemset(resarr,0,sizeof(hostresarr))){
fprintf(stderr," Couldn't allocate result array (%s?)\n",
cudaGetErrorString(cudaGetLastError()));
return EXIT_FAILURE;
}
if(cudadump(z,tmem,unit,gran,resarr)){
return EXIT_FAILURE;
}
if(cudaMemcpy(hostresarr,resarr,sizeof(hostresarr),cudaMemcpyDeviceToHost) || cudaFree(resarr)){
fprintf(stderr," Couldn't free result array (%s?)\n",
cudaGetErrorString(cudaGetLastError()));
return EXIT_FAILURE;
}
}
return EXIT_SUCCESS;
}
|
adf9f172ca8aecedc00b8118f2846d322dc9fe85.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "psc_cuda2.h"
#define BLOCKSIZE_X 1
#define BLOCKSIZE_Y 16
#define BLOCKSIZE_Z 16
#include "psc_fields_cuda2.h"
// ----------------------------------------------------------------------
// FIXME
#include "cuda_wrap.h"
#define BND (2)
#define X3_DEV_OFF_YZ(fldnr, jy, jz) \
((((fldnr)*mz + ((jz) + 2)) * my + ((jy) + 2)) * 1 + (0))
#undef F3_DEV
#define F3_DEV(fldnr, ix, jy, jz) (d_flds)[X3_DEV_OFF_YZ(fldnr, jy, jz)]
// FIXME end
// ----------------------------------------------------------------------
__global__ static void push_fields_E_yz(real* d_flds0, real dt, real cny,
real cnz, int my, int mz,
unsigned int size, int gridy)
{
int bidx_y = blockIdx.y % gridy;
int p = blockIdx.y / gridy;
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = bidx_y * blockDim.y + threadIdx.y;
if (!(iy < my - 2 * (2 - BND) && iz < mz - 2 * (2 - BND)))
return;
iy -= BND;
iz -= BND;
real* d_flds = d_flds0 + p * size;
F3_DEV(EX, 0, iy, iz) +=
cny * (F3_DEV(HZ, 0, iy, iz) - F3_DEV(HZ, 0, iy - 1, iz)) -
cnz * (F3_DEV(HY, 0, iy, iz) - F3_DEV(HY, 0, iy, iz - 1)) -
.5f * dt * F3_DEV(JXI, 0, iy, iz);
F3_DEV(EY, 0, iy, iz) +=
cnz * (F3_DEV(HX, 0, iy, iz) - F3_DEV(HX, 0, iy, iz - 1)) - 0.f -
.5f * dt * F3_DEV(JYI, 0, iy, iz);
F3_DEV(EZ, 0, iy, iz) +=
0.f - cny * (F3_DEV(HX, 0, iy, iz) - F3_DEV(HX, 0, iy - 1, iz)) -
.5f * dt * F3_DEV(JZI, 0, iy, iz);
}
__global__ static void push_fields_H_yz(real* d_flds0, real cny, real cnz,
int my, int mz, unsigned int size,
int gridy)
{
int bidx_y = blockIdx.y % gridy;
int p = blockIdx.y / gridy;
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = bidx_y * blockDim.y + threadIdx.y;
if (!(iy < my - 2 * (2 - BND) && iz < mz - 2 * (2 - BND)))
return;
iy -= BND;
iz -= BND;
real* d_flds = d_flds0 + p * size;
F3_DEV(HX, 0, iy, iz) -=
cny * (F3_DEV(EZ, 0, iy + 1, iz) - F3_DEV(EZ, 0, iy, iz)) -
cnz * (F3_DEV(EY, 0, iy, iz + 1) - F3_DEV(EY, 0, iy, iz));
F3_DEV(HY, 0, iy, iz) -=
cnz * (F3_DEV(EX, 0, iy, iz + 1) - F3_DEV(EX, 0, iy, iz)) - 0.f;
F3_DEV(HZ, 0, iy, iz) -=
0.f - cny * (F3_DEV(EX, 0, iy + 1, iz) - F3_DEV(EX, 0, iy, iz));
}
// ----------------------------------------------------------------------
// cuda2_push_mflds_E_yz
void cuda2_push_mflds_E_yz(struct psc_mfields* mflds)
{
struct psc_mfields_cuda2* sub = psc_mfields_cuda2(mflds);
if (mflds->nr_patches == 0) {
return;
}
struct psc_patch* patch = &ppsc->patch[0];
real dt = ppsc->dt;
real cny = .5f * ppsc->dt / patch->dx[1];
real cnz = .5f * ppsc->dt / patch->dx[2];
assert(patch->ldims[0] == 1);
unsigned int size = mflds->nr_fields * sub->im[0] * sub->im[1] * sub->im[2];
int my = sub->im[1];
int mz = sub->im[2];
int dimBlock[2] = {BLOCKSIZE_Y, BLOCKSIZE_Z};
int grid[2] = {(patch->ldims[1] + 2 * BND + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + 2 * BND + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z};
int dimGrid[2] = {grid[0], grid[1] * mflds->nr_patches};
RUN_KERNEL(dimGrid, dimBlock, push_fields_E_yz,
(sub->d_flds, dt, cny, cnz, my, mz, size, grid[1]));
}
// ----------------------------------------------------------------------
// cuda2_push_mflds_H_yz
void cuda2_push_mflds_H_yz(struct psc_mfields* mflds)
{
struct psc_mfields_cuda2* sub = psc_mfields_cuda2(mflds);
if (mflds->nr_patches == 0) {
return;
}
struct psc_patch* patch = &ppsc->patch[0];
real cny = .5f * ppsc->dt / patch->dx[1];
real cnz = .5f * ppsc->dt / patch->dx[2];
assert(patch->ldims[0] == 1);
unsigned int size = mflds->nr_fields * sub->im[0] * sub->im[1] * sub->im[2];
int my = sub->im[1];
int mz = sub->im[2];
int dimBlock[2] = {BLOCKSIZE_Y, BLOCKSIZE_Z};
int grid[2] = {(patch->ldims[1] + 2 * BND + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + 2 * BND + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z};
int dimGrid[2] = {grid[0], grid[1] * mflds->nr_patches};
RUN_KERNEL(dimGrid, dimBlock, push_fields_H_yz,
(sub->d_flds, cny, cnz, my, mz, size, grid[1]));
}
| adf9f172ca8aecedc00b8118f2846d322dc9fe85.cu |
#include "psc_cuda2.h"
#define BLOCKSIZE_X 1
#define BLOCKSIZE_Y 16
#define BLOCKSIZE_Z 16
#include "psc_fields_cuda2.h"
// ----------------------------------------------------------------------
// FIXME
#include "cuda_wrap.h"
#define BND (2)
#define X3_DEV_OFF_YZ(fldnr, jy, jz) \
((((fldnr)*mz + ((jz) + 2)) * my + ((jy) + 2)) * 1 + (0))
#undef F3_DEV
#define F3_DEV(fldnr, ix, jy, jz) (d_flds)[X3_DEV_OFF_YZ(fldnr, jy, jz)]
// FIXME end
// ----------------------------------------------------------------------
__global__ static void push_fields_E_yz(real* d_flds0, real dt, real cny,
real cnz, int my, int mz,
unsigned int size, int gridy)
{
int bidx_y = blockIdx.y % gridy;
int p = blockIdx.y / gridy;
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = bidx_y * blockDim.y + threadIdx.y;
if (!(iy < my - 2 * (2 - BND) && iz < mz - 2 * (2 - BND)))
return;
iy -= BND;
iz -= BND;
real* d_flds = d_flds0 + p * size;
F3_DEV(EX, 0, iy, iz) +=
cny * (F3_DEV(HZ, 0, iy, iz) - F3_DEV(HZ, 0, iy - 1, iz)) -
cnz * (F3_DEV(HY, 0, iy, iz) - F3_DEV(HY, 0, iy, iz - 1)) -
.5f * dt * F3_DEV(JXI, 0, iy, iz);
F3_DEV(EY, 0, iy, iz) +=
cnz * (F3_DEV(HX, 0, iy, iz) - F3_DEV(HX, 0, iy, iz - 1)) - 0.f -
.5f * dt * F3_DEV(JYI, 0, iy, iz);
F3_DEV(EZ, 0, iy, iz) +=
0.f - cny * (F3_DEV(HX, 0, iy, iz) - F3_DEV(HX, 0, iy - 1, iz)) -
.5f * dt * F3_DEV(JZI, 0, iy, iz);
}
__global__ static void push_fields_H_yz(real* d_flds0, real cny, real cnz,
int my, int mz, unsigned int size,
int gridy)
{
int bidx_y = blockIdx.y % gridy;
int p = blockIdx.y / gridy;
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = bidx_y * blockDim.y + threadIdx.y;
if (!(iy < my - 2 * (2 - BND) && iz < mz - 2 * (2 - BND)))
return;
iy -= BND;
iz -= BND;
real* d_flds = d_flds0 + p * size;
F3_DEV(HX, 0, iy, iz) -=
cny * (F3_DEV(EZ, 0, iy + 1, iz) - F3_DEV(EZ, 0, iy, iz)) -
cnz * (F3_DEV(EY, 0, iy, iz + 1) - F3_DEV(EY, 0, iy, iz));
F3_DEV(HY, 0, iy, iz) -=
cnz * (F3_DEV(EX, 0, iy, iz + 1) - F3_DEV(EX, 0, iy, iz)) - 0.f;
F3_DEV(HZ, 0, iy, iz) -=
0.f - cny * (F3_DEV(EX, 0, iy + 1, iz) - F3_DEV(EX, 0, iy, iz));
}
// ----------------------------------------------------------------------
// cuda2_push_mflds_E_yz
void cuda2_push_mflds_E_yz(struct psc_mfields* mflds)
{
struct psc_mfields_cuda2* sub = psc_mfields_cuda2(mflds);
if (mflds->nr_patches == 0) {
return;
}
struct psc_patch* patch = &ppsc->patch[0];
real dt = ppsc->dt;
real cny = .5f * ppsc->dt / patch->dx[1];
real cnz = .5f * ppsc->dt / patch->dx[2];
assert(patch->ldims[0] == 1);
unsigned int size = mflds->nr_fields * sub->im[0] * sub->im[1] * sub->im[2];
int my = sub->im[1];
int mz = sub->im[2];
int dimBlock[2] = {BLOCKSIZE_Y, BLOCKSIZE_Z};
int grid[2] = {(patch->ldims[1] + 2 * BND + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + 2 * BND + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z};
int dimGrid[2] = {grid[0], grid[1] * mflds->nr_patches};
RUN_KERNEL(dimGrid, dimBlock, push_fields_E_yz,
(sub->d_flds, dt, cny, cnz, my, mz, size, grid[1]));
}
// ----------------------------------------------------------------------
// cuda2_push_mflds_H_yz
void cuda2_push_mflds_H_yz(struct psc_mfields* mflds)
{
struct psc_mfields_cuda2* sub = psc_mfields_cuda2(mflds);
if (mflds->nr_patches == 0) {
return;
}
struct psc_patch* patch = &ppsc->patch[0];
real cny = .5f * ppsc->dt / patch->dx[1];
real cnz = .5f * ppsc->dt / patch->dx[2];
assert(patch->ldims[0] == 1);
unsigned int size = mflds->nr_fields * sub->im[0] * sub->im[1] * sub->im[2];
int my = sub->im[1];
int mz = sub->im[2];
int dimBlock[2] = {BLOCKSIZE_Y, BLOCKSIZE_Z};
int grid[2] = {(patch->ldims[1] + 2 * BND + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + 2 * BND + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z};
int dimGrid[2] = {grid[0], grid[1] * mflds->nr_patches};
RUN_KERNEL(dimGrid, dimBlock, push_fields_H_yz,
(sub->d_flds, cny, cnz, my, mz, size, grid[1]));
}
|
5b241dabe194cf02f57d8a6aad11728a42debb98.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
void Shape::map(void)
{
// TODO: use cudnn reduce tensor
checkCUDA(hipMalloc(&outputs[0].data_ptr, outputs[0].volume() * sizeof(DATATYPE)));
}
void Shape::unmap(void)
{
checkCUDA(hipFree(outputs[0].data_ptr));
}
void Shape::forward(bool block)
{
if (block)
checkCUDA(hipDeviceSynchronize());
}
void Model::measure_shape_cost(Shape* shape)
{
// TODO: use cudnn reduce tensor
shape->runtime = 0;
}
| 5b241dabe194cf02f57d8a6aad11728a42debb98.cu | /* Copyright 2019 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "taso/ops.h"
#include "taso/cuda_helper.h"
using namespace taso;
void Shape::map(void)
{
// TODO: use cudnn reduce tensor
checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputs[0].volume() * sizeof(DATATYPE)));
}
void Shape::unmap(void)
{
checkCUDA(cudaFree(outputs[0].data_ptr));
}
void Shape::forward(bool block)
{
if (block)
checkCUDA(cudaDeviceSynchronize());
}
void Model::measure_shape_cost(Shape* shape)
{
// TODO: use cudnn reduce tensor
shape->runtime = 0;
}
|
2650c5e89400d77289dde10ade4cd041265664f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHTensorCopy.h"
#include "THHApply.cuh"
#include "THHNumerics.cuh"
#include "THHTensorMath.cuh"
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/sequence.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/transform.h>
#if TORCH_HIP_VERSION >= 7000
#include <thrust/system/hip/execution_policy.h>
#endif
#include <cfloat>
template <typename T>
struct TensorFillOp {
TensorFillOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* v) { *v = val; }
const T val;
};
// copypasta from https://github.com/thrust/thrust/blob/master/examples/strided_range.cu
template <typename Iterator>
class strided_range
{
public:
typedef typename thrust::iterator_difference<Iterator>::type difference_type;
struct stride_functor : public thrust::unary_function<difference_type,
difference_type>
{
difference_type stride;
stride_functor(difference_type stride)
: stride(stride) {}
__host__ __device__
difference_type operator()(const difference_type& i) const
{
return stride * i;
}
};
typedef typename thrust::counting_iterator<difference_type> CountingIterator;
typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator;
typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator;
// type of the strided_range iterator
typedef PermutationIterator iterator;
// construct strided_range for the range [first,last)
strided_range(Iterator first, Iterator last, difference_type stride)
: first(first), last(last), stride(stride) {}
iterator begin(void) const
{
return PermutationIterator(first,
TransformIterator(CountingIterator(0),
stride_functor(stride)));
}
iterator end(void) const
{
return begin() + ((last - first) + (stride - 1)) / stride;
}
protected:
Iterator first;
Iterator last;
difference_type stride;
};
struct idx_functor
{
long div;
long size;
__host__ __device__
idx_functor(long div, long size) : div(div), size(size) {}
__host__ __device__
long operator()(long val) {
return (val / div) % size + 1;
}
};
template <typename T>
struct NonZeroOp
{
NonZeroOp() {}
__host__ __device__ bool operator()(T lhs) const {
if (THCNumerics<T>::ne(lhs, ScalarConvert<float, T>::to(0.0))) {
return true;
} else {
return false;
}
}
};
#include "generic/THCTensorMath.cu"
#include "THHGenerateAllTypes.h"
| 2650c5e89400d77289dde10ade4cd041265664f6.cu | #include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCTensorCopy.h"
#include "THCApply.cuh"
#include "THCNumerics.cuh"
#include "THCTensorMath.cuh"
#include <thrust/copy.h>
#include <thrust/count.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/sequence.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/transform.h>
#if CUDA_VERSION >= 7000
#include <thrust/system/cuda/execution_policy.h>
#endif
#include <cfloat>
template <typename T>
struct TensorFillOp {
TensorFillOp(T v) : val(v) {}
__device__ __forceinline__ void operator()(T* v) { *v = val; }
const T val;
};
// copypasta from https://github.com/thrust/thrust/blob/master/examples/strided_range.cu
template <typename Iterator>
class strided_range
{
public:
typedef typename thrust::iterator_difference<Iterator>::type difference_type;
struct stride_functor : public thrust::unary_function<difference_type,
difference_type>
{
difference_type stride;
stride_functor(difference_type stride)
: stride(stride) {}
__host__ __device__
difference_type operator()(const difference_type& i) const
{
return stride * i;
}
};
typedef typename thrust::counting_iterator<difference_type> CountingIterator;
typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator;
typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator;
// type of the strided_range iterator
typedef PermutationIterator iterator;
// construct strided_range for the range [first,last)
strided_range(Iterator first, Iterator last, difference_type stride)
: first(first), last(last), stride(stride) {}
iterator begin(void) const
{
return PermutationIterator(first,
TransformIterator(CountingIterator(0),
stride_functor(stride)));
}
iterator end(void) const
{
return begin() + ((last - first) + (stride - 1)) / stride;
}
protected:
Iterator first;
Iterator last;
difference_type stride;
};
struct idx_functor
{
long div;
long size;
__host__ __device__
idx_functor(long div, long size) : div(div), size(size) {}
__host__ __device__
long operator()(long val) {
return (val / div) % size + 1;
}
};
template <typename T>
struct NonZeroOp
{
NonZeroOp() {}
__host__ __device__ bool operator()(T lhs) const {
if (THCNumerics<T>::ne(lhs, ScalarConvert<float, T>::to(0.0))) {
return true;
} else {
return false;
}
}
};
#include "generic/THCTensorMath.cu"
#include "THCGenerateAllTypes.h"
|
d4c0b28c349e8033c1331396628035489d39b24c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cassert>
#include <chrono>
#include <ftk/numeric/inverse_linear_interpolation_solver.hh>
#include <ftk/numeric/linear_interpolation.hh>
#include <ftk/numeric/clamp.hh>
#include <ftk/numeric/symmetric_matrix.hh>
#include <ftk/numeric/fixed_point.hh>
#include <ftk/numeric/critical_point_type.hh>
#include <ftk/numeric/critical_point_test.hh>
#include <ftk/mesh/lattice.hh>
// #include <ftk/filters/critical_point_lite.hh>
#include "common_hip.cuh"
using namespace ftk;
template <int scope, typename F>
__device__ __host__
bool check_simplex_3dclt(
int current_timestep,
const lattice4_t& domain,
const lattice4_t& core,
const lattice3_t& ext, // array dimension
const element42_t& e,
const int nc,
const F *uv[2],
cp_t &p)
{
// typedef ftk::fixed_point<> fp_t;
if (e.corner[3] != current_timestep)
return false;
int vertices[3][4], indices[3];
size_t local_indices[3];
for (int i = 0; i < 3; i ++) {
for (int j = 0; j < 4; j ++) {
vertices[i][j] = e.corner[j]
+ unit_simplex_offset_4_2<scope>(e.type, i, j);
if (vertices[i][j] < domain.st[j] ||
vertices[i][j] > domain.st[j] + domain.sz[j] - 1)
return false;
}
indices[i] = domain.to_index(vertices[i]);
local_indices[i] = ext.to_index(vertices[i]);
}
F X[3][4], UV[3][2];
// fp_t UVf[3][2];
const F factor = 32768;
int64_t UVf[3][2];
for (int i = 0; i < 3; i ++) {
const size_t k = local_indices[i]; // k = ext.to_index(vertices[i]);
const size_t t = unit_simplex_offset_4_2<scope>(e.type, i, 3);
UV[i][0] = uv[t][k*nc];
UV[i][1] = uv[t][k*nc+1];
UVf[i][0] = UV[i][0] * factor;
UVf[i][1] = UV[i][1] * factor;
for (int j = 0; j < 3; j ++)
X[i][j] = vertices[i][j];
X[i][3] = vertices[i][3];
}
bool succ = robust_critical_point_in_simplex2(UVf, indices);
if (succ) {
// locate zero
F mu[3], // barycentric coordinates
cond; // condition number
inverse_lerp_s2v2(UV, mu, &cond);
ftk::clamp_barycentric<3>(mu);
// interpolation
F x[4];
lerp_s2v4(X, mu, x);
// result
p.x[0] = x[0];
p.x[1] = x[1];
p.x[2] = x[2];
p.t = x[3];
// p.cond = cond;
// printf("%f, %f, %f, %f\n", x[0], x[1], x[2], x[3]);
return true;
} else
return false;
}
template <int scope, typename F>
__global__
void sweep_simplices(
int current_timestep,
const lattice4_t domain,
const lattice4_t core,
const lattice3_t ext, // array dimension
const int nc,
const F *uv_c, // current timestep
const F *uv_n, // next timestep
unsigned long long &ncps, cp_t *cps)
{
const F *uv[2] = {uv_c, uv_n};
int tid = getGlobalIdx_3D_1D();
const element42_t e = element42_from_index<scope>(core, tid);
cp_t cp;
bool succ = check_simplex_3dclt<scope>(
current_timestep,
domain, core, ext, e, nc, uv, cp);
if (succ) {
unsigned long long i = atomicAdd(&ncps, 1ul);
cp.tag = tid;
cps[i] = cp;
}
}
template <int scope, typename F>
static std::vector<cp_t> extract_3dclt(
int current_timestep,
const lattice4_t& domain,
const lattice4_t& core,
const lattice3_t& ext,
const int nc,
const float *uv_c,
const float *uv_n)
{
auto t0 = std::chrono::high_resolution_clock::now();
const size_t ntasks = core.n() * ntypes_4_2<scope>();
// fprintf(stderr, "ntasks=%zu\n", ntasks);
const int maxGridDim = 1024;
const int blockSize = 256;
const int nBlocks = idivup(ntasks, blockSize);
dim3 gridSize;
if (nBlocks >= maxGridDim)
gridSize = dim3(idivup(nBlocks, maxGridDim), maxGridDim);
else
gridSize = dim3(nBlocks);
F *duv_c = NULL, *duv_n = NULL;
if (uv_c) {
hipMalloc((void**)&duv_c, sizeof(float) * ext.n() * nc);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating uv_c");
hipMemcpy(duv_c, uv_c, sizeof(float) * ext.n() * nc, hipMemcpyHostToDevice);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying uv_c");
}
if (uv_n) {
hipMalloc((void**)&duv_n, sizeof(float) * ext.n() * nc);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating uv_l");
hipMemcpy(duv_n, uv_n, sizeof(float) * ext.n() * nc, hipMemcpyHostToDevice);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying uv_l");
}
unsigned long long *dncps; // number of cps
hipMalloc((void**)&dncps, sizeof(unsigned long long));
hipMemset(dncps, 0, sizeof(unsigned long long));
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dncps");
cp_t *dcps;
hipMalloc((void**)&dcps, 1024*1024*256); // sizeof(cp_t) * core.n());
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dcps");
hipDeviceSynchronize();
fprintf(stderr, "calling kernel func...\n");
hipLaunchKernelGGL(( sweep_simplices<scope>), dim3(gridSize), dim3(blockSize), 0, 0,
current_timestep,
domain, core, ext, nc, duv_c, duv_n,
*dncps, dcps);
hipDeviceSynchronize();
checkLastCudaError("[FTK-CUDA] error: sweep_simplices, kernel function");
unsigned long long ncps = 0;
hipMemcpy(&ncps, dncps, sizeof(unsigned long long), hipMemcpyDeviceToHost);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: hipMemcpyDeviceToHost, dncps");
fprintf(stderr, "ncps=%llu\n", ncps);
std::vector<cp_t> cps(ncps);
hipMemcpy(cps.data(), dcps, sizeof(cp_t) * ncps, hipMemcpyDeviceToHost);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: hipMemcpyDeviceToHost");
if (duv_c) hipFree(duv_c);
if (duv_n) hipFree(duv_n);
hipFree(dncps);
hipFree(dcps);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: hipFree");
hipDeviceSynchronize();
auto t1 = std::chrono::high_resolution_clock::now();
float duration = std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count() * 1e-9;
fprintf(stderr, "exitting gpu kernel, ncps=%llu, time=%f\n", ncps, duration);
return cps;
}
std::vector<cp_t>
extract_3dclt_cuda(
int scope,
int current_timestep,
const ftk::lattice& domain,
const ftk::lattice& core,
const ftk::lattice& ext,
const int nc,
const float *uv_c,
const float *uv_n)
{
lattice4_t D(domain);
lattice4_t C(core);
lattice3_t E(ext);
if (scope == scope_interval)
return extract_3dclt<scope_interval, float>(current_timestep, D, C, E, nc, uv_c, uv_n);
else
return extract_3dclt<scope_ordinal, float>(current_timestep, D, C, E, nc, uv_c, uv_n);
}
| d4c0b28c349e8033c1331396628035489d39b24c.cu | #include <cstdio>
#include <cassert>
#include <chrono>
#include <ftk/numeric/inverse_linear_interpolation_solver.hh>
#include <ftk/numeric/linear_interpolation.hh>
#include <ftk/numeric/clamp.hh>
#include <ftk/numeric/symmetric_matrix.hh>
#include <ftk/numeric/fixed_point.hh>
#include <ftk/numeric/critical_point_type.hh>
#include <ftk/numeric/critical_point_test.hh>
#include <ftk/mesh/lattice.hh>
// #include <ftk/filters/critical_point_lite.hh>
#include "common.cuh"
using namespace ftk;
template <int scope, typename F>
__device__ __host__
bool check_simplex_3dclt(
int current_timestep,
const lattice4_t& domain,
const lattice4_t& core,
const lattice3_t& ext, // array dimension
const element42_t& e,
const int nc,
const F *uv[2],
cp_t &p)
{
// typedef ftk::fixed_point<> fp_t;
if (e.corner[3] != current_timestep)
return false;
int vertices[3][4], indices[3];
size_t local_indices[3];
for (int i = 0; i < 3; i ++) {
for (int j = 0; j < 4; j ++) {
vertices[i][j] = e.corner[j]
+ unit_simplex_offset_4_2<scope>(e.type, i, j);
if (vertices[i][j] < domain.st[j] ||
vertices[i][j] > domain.st[j] + domain.sz[j] - 1)
return false;
}
indices[i] = domain.to_index(vertices[i]);
local_indices[i] = ext.to_index(vertices[i]);
}
F X[3][4], UV[3][2];
// fp_t UVf[3][2];
const F factor = 32768;
int64_t UVf[3][2];
for (int i = 0; i < 3; i ++) {
const size_t k = local_indices[i]; // k = ext.to_index(vertices[i]);
const size_t t = unit_simplex_offset_4_2<scope>(e.type, i, 3);
UV[i][0] = uv[t][k*nc];
UV[i][1] = uv[t][k*nc+1];
UVf[i][0] = UV[i][0] * factor;
UVf[i][1] = UV[i][1] * factor;
for (int j = 0; j < 3; j ++)
X[i][j] = vertices[i][j];
X[i][3] = vertices[i][3];
}
bool succ = robust_critical_point_in_simplex2(UVf, indices);
if (succ) {
// locate zero
F mu[3], // barycentric coordinates
cond; // condition number
inverse_lerp_s2v2(UV, mu, &cond);
ftk::clamp_barycentric<3>(mu);
// interpolation
F x[4];
lerp_s2v4(X, mu, x);
// result
p.x[0] = x[0];
p.x[1] = x[1];
p.x[2] = x[2];
p.t = x[3];
// p.cond = cond;
// printf("%f, %f, %f, %f\n", x[0], x[1], x[2], x[3]);
return true;
} else
return false;
}
template <int scope, typename F>
__global__
void sweep_simplices(
int current_timestep,
const lattice4_t domain,
const lattice4_t core,
const lattice3_t ext, // array dimension
const int nc,
const F *uv_c, // current timestep
const F *uv_n, // next timestep
unsigned long long &ncps, cp_t *cps)
{
const F *uv[2] = {uv_c, uv_n};
int tid = getGlobalIdx_3D_1D();
const element42_t e = element42_from_index<scope>(core, tid);
cp_t cp;
bool succ = check_simplex_3dclt<scope>(
current_timestep,
domain, core, ext, e, nc, uv, cp);
if (succ) {
unsigned long long i = atomicAdd(&ncps, 1ul);
cp.tag = tid;
cps[i] = cp;
}
}
template <int scope, typename F>
static std::vector<cp_t> extract_3dclt(
int current_timestep,
const lattice4_t& domain,
const lattice4_t& core,
const lattice3_t& ext,
const int nc,
const float *uv_c,
const float *uv_n)
{
auto t0 = std::chrono::high_resolution_clock::now();
const size_t ntasks = core.n() * ntypes_4_2<scope>();
// fprintf(stderr, "ntasks=%zu\n", ntasks);
const int maxGridDim = 1024;
const int blockSize = 256;
const int nBlocks = idivup(ntasks, blockSize);
dim3 gridSize;
if (nBlocks >= maxGridDim)
gridSize = dim3(idivup(nBlocks, maxGridDim), maxGridDim);
else
gridSize = dim3(nBlocks);
F *duv_c = NULL, *duv_n = NULL;
if (uv_c) {
cudaMalloc((void**)&duv_c, sizeof(float) * ext.n() * nc);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating uv_c");
cudaMemcpy(duv_c, uv_c, sizeof(float) * ext.n() * nc, cudaMemcpyHostToDevice);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying uv_c");
}
if (uv_n) {
cudaMalloc((void**)&duv_n, sizeof(float) * ext.n() * nc);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating uv_l");
cudaMemcpy(duv_n, uv_n, sizeof(float) * ext.n() * nc, cudaMemcpyHostToDevice);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying uv_l");
}
unsigned long long *dncps; // number of cps
cudaMalloc((void**)&dncps, sizeof(unsigned long long));
cudaMemset(dncps, 0, sizeof(unsigned long long));
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dncps");
cp_t *dcps;
cudaMalloc((void**)&dcps, 1024*1024*256); // sizeof(cp_t) * core.n());
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dcps");
cudaDeviceSynchronize();
fprintf(stderr, "calling kernel func...\n");
sweep_simplices<scope><<<gridSize, blockSize>>>(
current_timestep,
domain, core, ext, nc, duv_c, duv_n,
*dncps, dcps);
cudaDeviceSynchronize();
checkLastCudaError("[FTK-CUDA] error: sweep_simplices, kernel function");
unsigned long long ncps = 0;
cudaMemcpy(&ncps, dncps, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: cudaMemcpyDeviceToHost, dncps");
fprintf(stderr, "ncps=%llu\n", ncps);
std::vector<cp_t> cps(ncps);
cudaMemcpy(cps.data(), dcps, sizeof(cp_t) * ncps, cudaMemcpyDeviceToHost);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: cudaMemcpyDeviceToHost");
if (duv_c) cudaFree(duv_c);
if (duv_n) cudaFree(duv_n);
cudaFree(dncps);
cudaFree(dcps);
checkLastCudaError("[FTK-CUDA] error: sweep_simplices: cudaFree");
cudaDeviceSynchronize();
auto t1 = std::chrono::high_resolution_clock::now();
float duration = std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count() * 1e-9;
fprintf(stderr, "exitting gpu kernel, ncps=%llu, time=%f\n", ncps, duration);
return cps;
}
std::vector<cp_t>
extract_3dclt_cuda(
int scope,
int current_timestep,
const ftk::lattice& domain,
const ftk::lattice& core,
const ftk::lattice& ext,
const int nc,
const float *uv_c,
const float *uv_n)
{
lattice4_t D(domain);
lattice4_t C(core);
lattice3_t E(ext);
if (scope == scope_interval)
return extract_3dclt<scope_interval, float>(current_timestep, D, C, E, nc, uv_c, uv_n);
else
return extract_3dclt<scope_ordinal, float>(current_timestep, D, C, E, nc, uv_c, uv_n);
}
|
ca3b6a23d2285f6d115736b845f64e37c0595ee7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2021, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/preconditioner/jacobi_kernels.hpp"
#include <ginkgo/config.hpp>
#include <ginkgo/core/base/exception_helpers.hpp>
#include "core/base/extended_float.hpp"
#include "core/components/fill_array.hpp"
#include "core/preconditioner/jacobi_utils.hpp"
#include "core/synthesizer/implementation_selection.hpp"
#include "cuda/base/config.hpp"
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/diagonal_block_manipulation.cuh"
#include "cuda/components/thread_ids.cuh"
#include "cuda/components/uninitialized_array.hpp"
#include "cuda/components/warp_blas.cuh"
#include "cuda/preconditioner/jacobi_common.hpp"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Jacobi preconditioner namespace.
* @ref Jacobi
* @ingroup jacobi
*/
namespace jacobi {
#include "common/cuda_hip/preconditioner/jacobi_generate_kernel.hpp.inc"
// clang-format off
#cmakedefine GKO_JACOBI_BLOCK_SIZE @GKO_JACOBI_BLOCK_SIZE@
// clang-format on
// make things easier for IDEs
#ifndef GKO_JACOBI_BLOCK_SIZE
#define GKO_JACOBI_BLOCK_SIZE 1
#endif
template <int warps_per_block, int max_block_size, typename ValueType,
typename IndexType>
void generate(syn::value_list<int, max_block_size>,
const matrix::Csr<ValueType, IndexType>* mtx,
remove_complex<ValueType> accuracy, ValueType* block_data,
const preconditioner::block_interleaved_storage_scheme<IndexType>&
storage_scheme,
remove_complex<ValueType>* conditioning,
precision_reduction* block_precisions,
const IndexType* block_ptrs, size_type num_blocks)
{
constexpr int subwarp_size = get_larger_power(max_block_size);
constexpr int blocks_per_warp = config::warp_size / subwarp_size;
const dim3 grid_size(ceildiv(num_blocks, warps_per_block * blocks_per_warp),
1, 1);
const dim3 block_size(subwarp_size, blocks_per_warp, warps_per_block);
if (block_precisions) {
hipLaunchKernelGGL(( kernel::adaptive_generate<max_block_size, subwarp_size, warps_per_block>)
, dim3(grid_size), dim3(block_size), 0, 0,
mtx->get_size()[0], mtx->get_const_row_ptrs(),
mtx->get_const_col_idxs(),
as_cuda_type(mtx->get_const_values()), as_cuda_type(accuracy),
as_cuda_type(block_data), storage_scheme,
as_cuda_type(conditioning), block_precisions, block_ptrs,
num_blocks);
} else {
hipLaunchKernelGGL(( kernel::generate<max_block_size, subwarp_size, warps_per_block>)
, dim3(grid_size), dim3(block_size), 0, 0,
mtx->get_size()[0], mtx->get_const_row_ptrs(),
mtx->get_const_col_idxs(),
as_cuda_type(mtx->get_const_values()), as_cuda_type(block_data),
storage_scheme, block_ptrs, num_blocks);
}
}
#define DECLARE_JACOBI_GENERATE_INSTANTIATION(ValueType, IndexType) \
void generate<config::min_warps_per_block, GKO_JACOBI_BLOCK_SIZE, \
ValueType, IndexType>( \
syn::value_list<int, GKO_JACOBI_BLOCK_SIZE>, \
const matrix::Csr<ValueType, IndexType>*, remove_complex<ValueType>, \
ValueType*, \
const preconditioner::block_interleaved_storage_scheme<IndexType>&, \
remove_complex<ValueType>*, precision_reduction*, const IndexType*, \
size_type)
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
DECLARE_JACOBI_GENERATE_INSTANTIATION);
} // namespace jacobi
} // namespace cuda
} // namespace kernels
} // namespace gko
| ca3b6a23d2285f6d115736b845f64e37c0595ee7.cu | /*******************************<GINKGO LICENSE>******************************
Copyright (c) 2017-2021, the Ginkgo authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<GINKGO LICENSE>*******************************/
#include "core/preconditioner/jacobi_kernels.hpp"
#include <ginkgo/config.hpp>
#include <ginkgo/core/base/exception_helpers.hpp>
#include "core/base/extended_float.hpp"
#include "core/components/fill_array.hpp"
#include "core/preconditioner/jacobi_utils.hpp"
#include "core/synthesizer/implementation_selection.hpp"
#include "cuda/base/config.hpp"
#include "cuda/base/math.hpp"
#include "cuda/base/types.hpp"
#include "cuda/components/cooperative_groups.cuh"
#include "cuda/components/diagonal_block_manipulation.cuh"
#include "cuda/components/thread_ids.cuh"
#include "cuda/components/uninitialized_array.hpp"
#include "cuda/components/warp_blas.cuh"
#include "cuda/preconditioner/jacobi_common.hpp"
namespace gko {
namespace kernels {
namespace cuda {
/**
* @brief The Jacobi preconditioner namespace.
* @ref Jacobi
* @ingroup jacobi
*/
namespace jacobi {
#include "common/cuda_hip/preconditioner/jacobi_generate_kernel.hpp.inc"
// clang-format off
#cmakedefine GKO_JACOBI_BLOCK_SIZE @GKO_JACOBI_BLOCK_SIZE@
// clang-format on
// make things easier for IDEs
#ifndef GKO_JACOBI_BLOCK_SIZE
#define GKO_JACOBI_BLOCK_SIZE 1
#endif
template <int warps_per_block, int max_block_size, typename ValueType,
typename IndexType>
void generate(syn::value_list<int, max_block_size>,
const matrix::Csr<ValueType, IndexType>* mtx,
remove_complex<ValueType> accuracy, ValueType* block_data,
const preconditioner::block_interleaved_storage_scheme<IndexType>&
storage_scheme,
remove_complex<ValueType>* conditioning,
precision_reduction* block_precisions,
const IndexType* block_ptrs, size_type num_blocks)
{
constexpr int subwarp_size = get_larger_power(max_block_size);
constexpr int blocks_per_warp = config::warp_size / subwarp_size;
const dim3 grid_size(ceildiv(num_blocks, warps_per_block * blocks_per_warp),
1, 1);
const dim3 block_size(subwarp_size, blocks_per_warp, warps_per_block);
if (block_precisions) {
kernel::adaptive_generate<max_block_size, subwarp_size, warps_per_block>
<<<grid_size, block_size, 0, 0>>>(
mtx->get_size()[0], mtx->get_const_row_ptrs(),
mtx->get_const_col_idxs(),
as_cuda_type(mtx->get_const_values()), as_cuda_type(accuracy),
as_cuda_type(block_data), storage_scheme,
as_cuda_type(conditioning), block_precisions, block_ptrs,
num_blocks);
} else {
kernel::generate<max_block_size, subwarp_size, warps_per_block>
<<<grid_size, block_size, 0, 0>>>(
mtx->get_size()[0], mtx->get_const_row_ptrs(),
mtx->get_const_col_idxs(),
as_cuda_type(mtx->get_const_values()), as_cuda_type(block_data),
storage_scheme, block_ptrs, num_blocks);
}
}
#define DECLARE_JACOBI_GENERATE_INSTANTIATION(ValueType, IndexType) \
void generate<config::min_warps_per_block, GKO_JACOBI_BLOCK_SIZE, \
ValueType, IndexType>( \
syn::value_list<int, GKO_JACOBI_BLOCK_SIZE>, \
const matrix::Csr<ValueType, IndexType>*, remove_complex<ValueType>, \
ValueType*, \
const preconditioner::block_interleaved_storage_scheme<IndexType>&, \
remove_complex<ValueType>*, precision_reduction*, const IndexType*, \
size_type)
GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(
DECLARE_JACOBI_GENERATE_INSTANTIATION);
} // namespace jacobi
} // namespace cuda
} // namespace kernels
} // namespace gko
|
5e1ad3d6f162b366a82594ad07a7e646beec7fd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define THREADS _THREADS_
__global__ void stroke(const int n,
const int imsize,
const float *ab,
const float *rnd,
int *ind,
const int grains){
const int i = blockIdx.x*THREADS + threadIdx.x;
if (i >= n*grains){
return;
}
const int k = 4*(int)floor((float)i/(float)grains);
const float dx = ab[k+2] - ab[k];
const float dy = ab[k+3] - ab[k+1];
const float r = rnd[i];
const float x = ab[k] + r*dx;
const float y = ab[k+1] + r*dy;
if (x < 0.0f || x >= 1.0f || y < 0.0f || y >= 1.0f){
ind[i] = -1;
return;
}
ind[i] = (int)(x*(float)imsize) + (int)(y*(float)imsize) * imsize;
}
| 5e1ad3d6f162b366a82594ad07a7e646beec7fd5.cu | #define THREADS _THREADS_
__global__ void stroke(const int n,
const int imsize,
const float *ab,
const float *rnd,
int *ind,
const int grains){
const int i = blockIdx.x*THREADS + threadIdx.x;
if (i >= n*grains){
return;
}
const int k = 4*(int)floor((float)i/(float)grains);
const float dx = ab[k+2] - ab[k];
const float dy = ab[k+3] - ab[k+1];
const float r = rnd[i];
const float x = ab[k] + r*dx;
const float y = ab[k+1] + r*dy;
if (x < 0.0f || x >= 1.0f || y < 0.0f || y >= 1.0f){
ind[i] = -1;
return;
}
ind[i] = (int)(x*(float)imsize) + (int)(y*(float)imsize) * imsize;
}
|
7e6eb0d8c649b5f08f50fc288c9f3a77a191a722.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file density_map_cuda_kernel.cu
* @author Yibo Lin
* @date Dec 2019
*/
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "hip/hip_runtime.h"
#include "utility/src/utils.cuh"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
inline __device__ void distributeBox2Bin(
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
T bxl, T byl, T bxh, T byh,
T* buf_map
)
{
// density overflow function
auto computeDensityFunc = [](T x, T node_size, T bin_center, T bin_size){
return DREAMPLACE_STD_NAMESPACE::max(T(0.0), DREAMPLACE_STD_NAMESPACE::min(x+node_size, bin_center+bin_size/2) - DREAMPLACE_STD_NAMESPACE::max(x, bin_center-bin_size/2));
};
// x direction
int bin_index_xl = int((bxl-xl)/bin_size_x);
int bin_index_xh = int(ceil((bxh-xl)/bin_size_x))+1; // exclusive
bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0);
bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x);
// y direction
int bin_index_yl = int((byl-yl-2*bin_size_y)/bin_size_y);
int bin_index_yh = int(ceil((byh-yl+2*bin_size_y)/bin_size_y))+1; // exclusive
bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0);
bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y);
for (int k = bin_index_xl; k < bin_index_xh; ++k)
{
T px = computeDensityFunc(bxl, bxh - bxl, bin_center_x_tensor[k], bin_size_x);
for (int h = bin_index_yl; h < bin_index_yh; ++h)
{
T py = computeDensityFunc(byl, byh - byl, bin_center_y_tensor[h], bin_size_y);
// still area
atomicAdd(&buf_map[k*num_bins_y+h], px * py);
}
}
};
template <typename T>
__global__ void computeDensityMap(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
T* density_map_tensor
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nodes)
{
T bxl = x_tensor[i];
T byl = y_tensor[i];
T bxh = bxl + node_size_x_tensor[i];
T byh = byl + node_size_y_tensor[i];
distributeBox2Bin(
bin_center_x_tensor, bin_center_y_tensor,
num_bins_x, num_bins_y,
xl, yl, xh, yh,
bin_size_x, bin_size_y,
bxl, byl, bxh, byh,
density_map_tensor
);
}
}
/// @brief compute density map
/// @param x_tensor cell x locations
/// @param y_tensor cell y locations
/// @param node_size_x_tensor cell width array
/// @param node_size_y_tensor cell height array
/// @param bin_center_x_tensor bin center x locations
/// @param bin_center_y_tensor bin center y locations
/// @param num_nodes number of cells
/// @param num_bins_x number of bins in horizontal bins
/// @param num_bins_y number of bins in vertical bins
/// @param xl left boundary
/// @param yl bottom boundary
/// @param xh right boundary
/// @param yh top boundary
/// @param bin_size_x bin width
/// @param bin_size_y bin height
/// @param density_map_tensor 2D density map in column-major to write
template <typename T>
int computeDensityMapCudaLauncher(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
T* density_map_tensor
)
{
int thread_count = 256;
int block_count = ceilDiv(num_nodes, thread_count);
hipLaunchKernelGGL(( computeDensityMap), dim3(block_count), dim3(thread_count), 0, 0,
x_tensor, y_tensor,
node_size_x_tensor, node_size_y_tensor,
bin_center_x_tensor, bin_center_y_tensor,
num_nodes,
num_bins_x, num_bins_y,
xl, yl, xh, yh,
bin_size_x, bin_size_y,
density_map_tensor
);
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
template int computeDensityMapCudaLauncher<T>(\
const T* x_tensor, const T* y_tensor, \
const T* node_size_x_tensor, const T* node_size_y_tensor, \
const T* bin_center_x_tensor, const T* bin_center_y_tensor, \
const int num_nodes, \
const int num_bins_x, const int num_bins_y, \
const T xl, const T yl, const T xh, const T yh, \
const T bin_size_x, const T bin_size_y, \
T* density_map_tensor\
);
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
| 7e6eb0d8c649b5f08f50fc288c9f3a77a191a722.cu | /**
* @file density_map_cuda_kernel.cu
* @author Yibo Lin
* @date Dec 2019
*/
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "cuda_runtime.h"
#include "utility/src/utils.cuh"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
inline __device__ void distributeBox2Bin(
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
T bxl, T byl, T bxh, T byh,
T* buf_map
)
{
// density overflow function
auto computeDensityFunc = [](T x, T node_size, T bin_center, T bin_size){
return DREAMPLACE_STD_NAMESPACE::max(T(0.0), DREAMPLACE_STD_NAMESPACE::min(x+node_size, bin_center+bin_size/2) - DREAMPLACE_STD_NAMESPACE::max(x, bin_center-bin_size/2));
};
// x direction
int bin_index_xl = int((bxl-xl)/bin_size_x);
int bin_index_xh = int(ceil((bxh-xl)/bin_size_x))+1; // exclusive
bin_index_xl = DREAMPLACE_STD_NAMESPACE::max(bin_index_xl, 0);
bin_index_xh = DREAMPLACE_STD_NAMESPACE::min(bin_index_xh, num_bins_x);
// y direction
int bin_index_yl = int((byl-yl-2*bin_size_y)/bin_size_y);
int bin_index_yh = int(ceil((byh-yl+2*bin_size_y)/bin_size_y))+1; // exclusive
bin_index_yl = DREAMPLACE_STD_NAMESPACE::max(bin_index_yl, 0);
bin_index_yh = DREAMPLACE_STD_NAMESPACE::min(bin_index_yh, num_bins_y);
for (int k = bin_index_xl; k < bin_index_xh; ++k)
{
T px = computeDensityFunc(bxl, bxh - bxl, bin_center_x_tensor[k], bin_size_x);
for (int h = bin_index_yl; h < bin_index_yh; ++h)
{
T py = computeDensityFunc(byl, byh - byl, bin_center_y_tensor[h], bin_size_y);
// still area
atomicAdd(&buf_map[k*num_bins_y+h], px * py);
}
}
};
template <typename T>
__global__ void computeDensityMap(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
T* density_map_tensor
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nodes)
{
T bxl = x_tensor[i];
T byl = y_tensor[i];
T bxh = bxl + node_size_x_tensor[i];
T byh = byl + node_size_y_tensor[i];
distributeBox2Bin(
bin_center_x_tensor, bin_center_y_tensor,
num_bins_x, num_bins_y,
xl, yl, xh, yh,
bin_size_x, bin_size_y,
bxl, byl, bxh, byh,
density_map_tensor
);
}
}
/// @brief compute density map
/// @param x_tensor cell x locations
/// @param y_tensor cell y locations
/// @param node_size_x_tensor cell width array
/// @param node_size_y_tensor cell height array
/// @param bin_center_x_tensor bin center x locations
/// @param bin_center_y_tensor bin center y locations
/// @param num_nodes number of cells
/// @param num_bins_x number of bins in horizontal bins
/// @param num_bins_y number of bins in vertical bins
/// @param xl left boundary
/// @param yl bottom boundary
/// @param xh right boundary
/// @param yh top boundary
/// @param bin_size_x bin width
/// @param bin_size_y bin height
/// @param density_map_tensor 2D density map in column-major to write
template <typename T>
int computeDensityMapCudaLauncher(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
T* density_map_tensor
)
{
int thread_count = 256;
int block_count = ceilDiv(num_nodes, thread_count);
computeDensityMap<<<block_count, thread_count>>>(
x_tensor, y_tensor,
node_size_x_tensor, node_size_y_tensor,
bin_center_x_tensor, bin_center_y_tensor,
num_nodes,
num_bins_x, num_bins_y,
xl, yl, xh, yh,
bin_size_x, bin_size_y,
density_map_tensor
);
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
template int computeDensityMapCudaLauncher<T>(\
const T* x_tensor, const T* y_tensor, \
const T* node_size_x_tensor, const T* node_size_y_tensor, \
const T* bin_center_x_tensor, const T* bin_center_y_tensor, \
const int num_nodes, \
const int num_bins_x, const int num_bins_y, \
const T xl, const T yl, const T xh, const T yh, \
const T bin_size_x, const T bin_size_y, \
T* density_map_tensor\
);
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
48e2eff2ef2167c88e3c5805c00c25dd415b915d.hip | // !!! This is a file automatically generated by hipify!!!
#include "my_lib_kernel.h"
#include "stdio.h"
float get_cuda(float* ans, int idx){
float t=0;
hipMemcpy(&t, ans+idx, sizeof(float), hipMemcpyDeviceToHost);
return t;
}
void set_cuda(float* ans, int idx, float t){
hipMemcpy(ans+idx, &t, sizeof(float), hipMemcpyHostToDevice);
hipDeviceSynchronize();
} | 48e2eff2ef2167c88e3c5805c00c25dd415b915d.cu | #include "my_lib_kernel.h"
#include "stdio.h"
float get_cuda(float* ans, int idx){
float t=0;
cudaMemcpy(&t, ans+idx, sizeof(float), cudaMemcpyDeviceToHost);
return t;
}
void set_cuda(float* ans, int idx, float t){
cudaMemcpy(ans+idx, &t, sizeof(float), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
} |
813a051e2319e498dbafbb60e32de8c57343f7ae.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define spmv_NBLOCKS 12*8*21 //22
#define spmv_BLOCK_SIZE 256
#define WARP_SIZE 32
static const double MAX_RELATIVE_ERROR = .02;
static const int PAD_FACTOR = 16;
void fill(float *A, const int n, const float maxi)
{
for (int j = 0; j < n; j++)
{
A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim)
{
int nnzAssigned = 0;
// Figure out the probability that a nonzero should be assigned to a given
// spot in the matrix
double prob = (double)n / ((double)dim * (double)dim);
// Seed random number generator
srand48(2013);
// Randomly decide whether entry i,j gets a value, but ensure n values
// are assigned
bool fillRemaining = false;
for (int i = 0; i < dim; i++)
{
rowDelimiters[i] = nnzAssigned;
for (int j = 0; j < dim; j++)
{
int numEntriesLeft = (dim * dim) - ((i * dim) + j);
int needToAssign = n - nnzAssigned;
if (numEntriesLeft <= needToAssign) {
fillRemaining = true;
}
if ((nnzAssigned < n && drand48() <= prob) || fillRemaining)
{
// Assign (i,j) a value
cols[nnzAssigned] = j;
nnzAssigned++;
}
}
}
// Observe the convention to put the number of non zeroes at the end of the
// row delimiters array
rowDelimiters[dim] = n;
assert(nnzAssigned == n);
}
void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters,
float **newA_ptr, int **newcols_ptr, int *newIndices,
int *newSize)
{
// determine total padded size and new row indices
int paddedSize = 0;
int rowSize;
for (int i=0; i<dim; i++)
{
newIndices[i] = paddedSize;
rowSize = rowDelimiters[i+1] - rowDelimiters[i];
if (rowSize % PAD_FACTOR != 0)
{
rowSize += PAD_FACTOR - rowSize % PAD_FACTOR;
}
paddedSize += rowSize;
}
*newSize = paddedSize;
newIndices[dim] = paddedSize;
hipHostMalloc(newA_ptr, paddedSize * sizeof(float));
hipHostMalloc(newcols_ptr, paddedSize * sizeof(int));
float *newA = *newA_ptr;
int *newcols = *newcols_ptr;
memset(newA, 0, paddedSize * sizeof(float));
// fill newA and newcols
for (int i=0; i<dim; i++)
{
for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1];
j++, k++)
{
newA[k] = A[j];
newcols[k] = cols[j];
}
}
}
void spmvCpu(const float *val, const int *cols, const int *rowDelimiters,
const float *vec, int dim, float *out)
{
for (int i=0; i<dim; i++)
{
float t = 0;
for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++)
{
int col = cols[j];
t += val[j] * vec[col];
}
out[i] = t;
}
}
void spmv_verifyResults(const float *cpuResults, const float *gpuResults,
const int size)
{
bool passed = true;
for (int i = 0; i < size; i++)
{
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i]
> MAX_RELATIVE_ERROR)
{
cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] <<
" dev: " << gpuResults[i] << endl;
return;
}
}
cout << "spmv passed" << endl;
}
__global__ void
spmv_kernel(const float* val,
const int * cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out,int *index_rowDeli)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ volatile float partialSums[spmv_BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
if (blockIdx.x==1&&threadIdx.x<16)
{atomicAdd(&index_rowDeli[threadIdx.x*(dim+1)+myRow],1);
atomicAdd(&index_rowDeli[threadIdx.x*(dim+1)+myRow+1],1);
}
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = cols[j];
mySum += val[j] * vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
hipSetDevice(2);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero
float maxval = 200.0;
hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float));
hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int));
hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice);
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE));
int *rowDeli_index=(int *)malloc(16*(spmv_numRows+1)*sizeof(int));
int *index_rowDeli;
hipMalloc((void **)&index_rowDeli,16*(spmv_numRows+1)*sizeof(int));
hipMemset(index_rowDeli,0,16*(spmv_numRows+1)*sizeof(int));
hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(spmv_BLOCK_SIZE), 0, 0,
d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out,index_rowDeli);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(rowDeli_index,index_rowDeli,16*(spmv_numRows+1)*sizeof(int),hipMemcpyDeviceToHost);
FILE *f2=fopen("rowDeli_D1.txt","w");
for(int ii=0;ii<16;ii++){
// fprintf(f2,"\n");
for(int jj=0;jj<spmv_numRows+1;jj++)
{if(rowDeli_index[ii*(spmv_numRows+1)+jj]!=0)
fprintf(f2,"%d,%d ;",jj,rowDeli_index[ii*(spmv_numRows+1)+jj]);
}
fprintf(f2,"\n");
}
spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
printf("Profiling results saved to \"rowDeli_D1.txt\"\n");
printf("Please use analysis.py to analysis them\n");
return 0;
}
| 813a051e2319e498dbafbb60e32de8c57343f7ae.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define spmv_NBLOCKS 12*8*21 //22
#define spmv_BLOCK_SIZE 256
#define WARP_SIZE 32
static const double MAX_RELATIVE_ERROR = .02;
static const int PAD_FACTOR = 16;
void fill(float *A, const int n, const float maxi)
{
for (int j = 0; j < n; j++)
{
A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim)
{
int nnzAssigned = 0;
// Figure out the probability that a nonzero should be assigned to a given
// spot in the matrix
double prob = (double)n / ((double)dim * (double)dim);
// Seed random number generator
srand48(2013);
// Randomly decide whether entry i,j gets a value, but ensure n values
// are assigned
bool fillRemaining = false;
for (int i = 0; i < dim; i++)
{
rowDelimiters[i] = nnzAssigned;
for (int j = 0; j < dim; j++)
{
int numEntriesLeft = (dim * dim) - ((i * dim) + j);
int needToAssign = n - nnzAssigned;
if (numEntriesLeft <= needToAssign) {
fillRemaining = true;
}
if ((nnzAssigned < n && drand48() <= prob) || fillRemaining)
{
// Assign (i,j) a value
cols[nnzAssigned] = j;
nnzAssigned++;
}
}
}
// Observe the convention to put the number of non zeroes at the end of the
// row delimiters array
rowDelimiters[dim] = n;
assert(nnzAssigned == n);
}
void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters,
float **newA_ptr, int **newcols_ptr, int *newIndices,
int *newSize)
{
// determine total padded size and new row indices
int paddedSize = 0;
int rowSize;
for (int i=0; i<dim; i++)
{
newIndices[i] = paddedSize;
rowSize = rowDelimiters[i+1] - rowDelimiters[i];
if (rowSize % PAD_FACTOR != 0)
{
rowSize += PAD_FACTOR - rowSize % PAD_FACTOR;
}
paddedSize += rowSize;
}
*newSize = paddedSize;
newIndices[dim] = paddedSize;
cudaMallocHost(newA_ptr, paddedSize * sizeof(float));
cudaMallocHost(newcols_ptr, paddedSize * sizeof(int));
float *newA = *newA_ptr;
int *newcols = *newcols_ptr;
memset(newA, 0, paddedSize * sizeof(float));
// fill newA and newcols
for (int i=0; i<dim; i++)
{
for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1];
j++, k++)
{
newA[k] = A[j];
newcols[k] = cols[j];
}
}
}
void spmvCpu(const float *val, const int *cols, const int *rowDelimiters,
const float *vec, int dim, float *out)
{
for (int i=0; i<dim; i++)
{
float t = 0;
for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++)
{
int col = cols[j];
t += val[j] * vec[col];
}
out[i] = t;
}
}
void spmv_verifyResults(const float *cpuResults, const float *gpuResults,
const int size)
{
bool passed = true;
for (int i = 0; i < size; i++)
{
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i]
> MAX_RELATIVE_ERROR)
{
cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] <<
" dev: " << gpuResults[i] << endl;
return;
}
}
cout << "spmv passed" << endl;
}
__global__ void
spmv_kernel(const float* val,
const int * cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out,int *index_rowDeli)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ volatile float partialSums[spmv_BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
if (blockIdx.x==1&&threadIdx.x<16)
{atomicAdd(&index_rowDeli[threadIdx.x*(dim+1)+myRow],1);
atomicAdd(&index_rowDeli[threadIdx.x*(dim+1)+myRow+1],1);
}
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = cols[j];
mySum += val[j] * vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
cudaSetDevice(2);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero
float maxval = 200.0;
cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float));
cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int));
cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE));
int *rowDeli_index=(int *)malloc(16*(spmv_numRows+1)*sizeof(int));
int *index_rowDeli;
cudaMalloc((void **)&index_rowDeli,16*(spmv_numRows+1)*sizeof(int));
cudaMemset(index_rowDeli,0,16*(spmv_numRows+1)*sizeof(int));
spmv_kernel <<<spmv_grid, spmv_BLOCK_SIZE>>>
(d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out,index_rowDeli);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(rowDeli_index,index_rowDeli,16*(spmv_numRows+1)*sizeof(int),cudaMemcpyDeviceToHost);
FILE *f2=fopen("rowDeli_D1.txt","w");
for(int ii=0;ii<16;ii++){
// fprintf(f2,"\n");
for(int jj=0;jj<spmv_numRows+1;jj++)
{if(rowDeli_index[ii*(spmv_numRows+1)+jj]!=0)
fprintf(f2,"%d,%d ;",jj,rowDeli_index[ii*(spmv_numRows+1)+jj]);
}
fprintf(f2,"\n");
}
spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
printf("Profiling results saved to \"rowDeli_D1.txt\"\n");
printf("Please use analysis.py to analysis them\n");
return 0;
}
|
997fa1c65152bfecf49d1d7e2b868840b71d9901.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void elevar_al_cuadrado(float * d_salida, float * d_entrada){
int idx = threadIdx.x;
float f = d_entrada[idx];
d_salida[idx] = f*f;
} | 997fa1c65152bfecf49d1d7e2b868840b71d9901.cu | #include "includes.h"
__global__ void elevar_al_cuadrado(float * d_salida, float * d_entrada){
int idx = threadIdx.x;
float f = d_entrada[idx];
d_salida[idx] = f*f;
} |
1436aa5d1d7fcc9006358b3b127f2b83be8deac2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cudapoa_kernels.cuh"
#include <stdio.h>
namespace claragenomics
{
namespace cudapoa
{
/**
* @brief Device function for adding a new alignment to the partial order alignment graph.
*
* @param[out] new_node_count Number of nodes in graph after update
* @param[in/out] nodes Device buffer with unique nodes in graph
* @param[in] node_count Number of nodes in graph
* @graph[in] node_alignments Device buffer with alignment nodes per node in graph
* @param[in] node_alignment_count Device buffer with number of aligned nodes
* @param[in] incoming_edges Device buffer with incoming edges per node
* @param[in] incoming_edges_count Device buffer with number of incoming edges per node
* @param[in] outgoing_edges Device buffer with outgoing edges per node
* @param[in] outgoing_edges_count Device buffer with number of outgoing edges per node
* @param[in] incoming_edge_w Device buffer with weight of incoming edges
* @param[in] outgoing_edge_w Device buffer with weight of outgoing edges
* @param[in] alignment_length Total length of new alignment
* @param[in] graph Device scratch space with sorted graph
* @param[in] alignment_graph Device buffer with nodes from graph in alignment
* @param[in] read Device scratch space with sequence
* @param[in] alignment_read Device buffer with bases from read in alignment
* @param[in] node_coverage_count Device buffer with coverage of each node in graph
* @param[in] base_weights Device buffer with weight of each node in read
* @param[in] sequence_begin_nodes_ids Device buffer with begining node of each sequence
* @param[in] outgoing_edges_coverage Device buffer with coverage of each edge in graph
* @param[in] outgoing_edges_coverage_count Device buffer with coverage count of each edge in graph
* @param[in] s Current sequence id
* @param[in] max_sequences_per_poa Maximum sequences allowed in a graph
*
* @return Status code for any errors encountered.
*/
template <bool msa = false>
__device__
uint8_t
addAlignmentToGraph(uint16_t& new_node_count,
uint8_t* nodes,
uint16_t node_count,
uint16_t* node_alignments, uint16_t* node_alignment_count,
uint16_t* incoming_edges, uint16_t* incoming_edge_count,
uint16_t* outgoing_edges, uint16_t* outgoing_edge_count,
uint16_t* incoming_edge_w, uint16_t* outgoing_edge_w,
uint16_t alignment_length,
uint16_t* graph,
int16_t* alignment_graph,
uint8_t* read,
int16_t* alignment_read,
uint16_t* node_coverage_counts,
int8_t* base_weights,
uint16_t* sequence_begin_nodes_ids,
uint16_t* outgoing_edges_coverage,
uint16_t* outgoing_edges_coverage_count,
uint16_t s,
uint32_t max_sequences_per_poa)
{
//printf("Running addition for alignment %d\n", alignment_length);
int16_t head_node_id = -1;
int16_t curr_node_id = -1;
uint16_t prev_weight = 0;
// Basic algorithm is to iterate through the alignment of the read.
// For each position in that alignment -
// if it's an insert in the read
// add a new node
// if it is aligned
// check if node base matches read base. if so, move on.
// if node base doesn't match, check other aligned nodes
// if none of the other aligned nodes match, add new node
// else use one of aligned nodes and move on.
for (int16_t pos = alignment_length - 1; pos >= 0; pos--)
{
bool new_node = false;
int16_t read_pos = alignment_read[pos];
// Case where base in read in an insert.
if (read_pos != -1)
{
int8_t NODE_WEIGHT = base_weights[read_pos];
//printf("%c ", read[read_pos]);
uint8_t read_base = read[read_pos];
int16_t graph_node_id = alignment_graph[pos];
if (graph_node_id == -1)
{
// No alignment node found in graph.
// Create new node.
curr_node_id = node_count++;
if (node_count >= CUDAPOA_MAX_NODES_PER_WINDOW)
{
return static_cast<uint8_t>(StatusType::node_count_exceeded_maximum_graph_size);
}
//printf("create new node %d\n", curr_node_id);
new_node = true;
nodes[curr_node_id] = read_base;
outgoing_edge_count[curr_node_id] = 0;
incoming_edge_count[curr_node_id] = 0;
node_alignment_count[curr_node_id] = 0;
node_coverage_counts[curr_node_id] = 0;
}
else
{
// Get base information for aligned node in graph.
uint8_t graph_base = nodes[graph_node_id];
//printf("graph base %c\n", graph_base);
// If bases match, then set current node id to graph node id.
if (graph_base == read_base)
{
//printf("graph and read base are same\n");
curr_node_id = graph_node_id;
}
else
{
// Since bases don't match, iterate through all aligned nodes of
// graph node, and check against their bases. If a base matches,
// then set the current node as that aligned node.
uint16_t num_aligned_node = node_alignment_count[graph_node_id];
//printf("aligned nodes are %d\n", num_aligned_node);
int16_t aligned_node_id = -1;
//printf("looping through alignments\n");
for (uint16_t n = 0; n < num_aligned_node; n++)
{
uint16_t aid = node_alignments[graph_node_id * CUDAPOA_MAX_NODE_ALIGNMENTS + n];
if (nodes[aid] == read_base)
{
aligned_node_id = aid;
break;
}
}
if (aligned_node_id != -1)
{
//printf("found aligned node %d\n", aligned_node_id);
curr_node_id = aligned_node_id;
}
else
{
// However, if none of the nodes in the aligned list match either,
// then create a new node and update the graph node (+ aligned nodes)
// with information about this new node since it also becomes an aligned
// node to the others.
new_node = true;
curr_node_id = node_count++;
if (node_count >= CUDAPOA_MAX_NODES_PER_WINDOW)
{
return static_cast<uint8_t>(StatusType::node_count_exceeded_maximum_graph_size);
}
//printf("create new node %d\n", curr_node_id);
nodes[curr_node_id] = read_base;
outgoing_edge_count[curr_node_id] = 0;
incoming_edge_count[curr_node_id] = 0;
node_alignment_count[curr_node_id] = 0;
node_coverage_counts[curr_node_id] = 0;
uint16_t new_node_alignments = 0;
for (uint16_t n = 0; n < num_aligned_node; n++)
{
uint16_t aid = node_alignments[graph_node_id * CUDAPOA_MAX_NODE_ALIGNMENTS + n];
uint16_t aid_count = node_alignment_count[aid];
node_alignments[aid * CUDAPOA_MAX_NODE_ALIGNMENTS + aid_count] = curr_node_id;
node_alignment_count[aid] = aid_count + 1;
node_alignments[curr_node_id * CUDAPOA_MAX_NODE_ALIGNMENTS + new_node_alignments] = aid;
new_node_alignments++;
}
node_alignments[graph_node_id * CUDAPOA_MAX_NODE_ALIGNMENTS + num_aligned_node] = curr_node_id;
node_alignment_count[graph_node_id] = num_aligned_node + 1;
node_alignments[curr_node_id * CUDAPOA_MAX_NODE_ALIGNMENTS + new_node_alignments] = graph_node_id;
new_node_alignments++;
node_alignment_count[curr_node_id] = new_node_alignments;
}
}
}
if (new_node)
{
//printf("new node %d\n", curr_node_id);
}
// for msa generation
if (msa && (read_pos == 0))
{
//begin node of the sequence, add its node_id (curr_node_id) to sequence_begin_nodes_ids
*sequence_begin_nodes_ids = curr_node_id;
// printf("adding sequence_begin_nodes_ids = %d\n", curr_node_id);
}
// Create new edges if necessary.
if (head_node_id != -1)
{
bool edge_exists = false;
uint16_t in_count = incoming_edge_count[curr_node_id];
for (uint16_t e = 0; e < in_count; e++)
{
if (incoming_edges[curr_node_id * CUDAPOA_MAX_NODE_EDGES + e] == head_node_id)
{
edge_exists = true;
incoming_edge_w[curr_node_id * CUDAPOA_MAX_NODE_EDGES + e] += (prev_weight + NODE_WEIGHT);
//printf("Update existing node from %d to %d with weight %d\n", head_node_id, curr_node_id, incoming_edge_w[curr_node_id * CUDAPOA_MAX_NODE_EDGES + e]);
}
}
if (!edge_exists)
{
incoming_edges[curr_node_id * CUDAPOA_MAX_NODE_EDGES + in_count] = head_node_id;
incoming_edge_w[curr_node_id * CUDAPOA_MAX_NODE_EDGES + in_count] = prev_weight + NODE_WEIGHT;
incoming_edge_count[curr_node_id] = in_count + 1;
uint16_t out_count = outgoing_edge_count[head_node_id];
outgoing_edges[head_node_id * CUDAPOA_MAX_NODE_EDGES + out_count] = curr_node_id;
if (msa)
{
outgoing_edges_coverage_count[head_node_id * CUDAPOA_MAX_NODE_EDGES + out_count] = 1;
outgoing_edges_coverage[(head_node_id * CUDAPOA_MAX_NODE_EDGES + out_count) * max_sequences_per_poa] = s;
}
outgoing_edge_count[head_node_id] = out_count + 1;
//printf("Created new edge %d to %d with weight %d\n", head_node_id, curr_node_id, prev_weight + NODE_WEIGHT);
if (out_count + 1 >= CUDAPOA_MAX_NODE_EDGES || in_count + 1 >= CUDAPOA_MAX_NODE_EDGES)
{
return static_cast<uint8_t>(StatusType::edge_count_exceeded_maximum_graph_size);
//printf("exceeded max edge count\n");
}
}
else if (msa) //if edge exists and for msa generation
{
uint16_t out_count = outgoing_edge_count[head_node_id];
for (uint16_t e = 0; e < out_count; e++)
{
if (outgoing_edges[head_node_id * CUDAPOA_MAX_NODE_EDGES + e] == curr_node_id)
{
uint16_t out_edge_coverage_count = outgoing_edges_coverage_count[head_node_id * CUDAPOA_MAX_NODE_EDGES + e];
outgoing_edges_coverage[(head_node_id * CUDAPOA_MAX_NODE_EDGES + e) * max_sequences_per_poa + out_edge_coverage_count] = s;
outgoing_edges_coverage_count[head_node_id * CUDAPOA_MAX_NODE_EDGES + e] = out_edge_coverage_count + 1;
break;
}
}
}
}
head_node_id = curr_node_id;
// If a node is seen within a graph, then it's part of some
// read, hence its coverage is incremented by 1.
node_coverage_counts[head_node_id]++;
prev_weight = NODE_WEIGHT;
}
}
//printf("final size %d\n", node_count);
new_node_count = node_count;
return static_cast<uint8_t>(StatusType::success);
}
// kernel that calls the addAlignmentToGraph device funtion
__global__ void addAlignmentKernel(uint8_t* nodes,
uint16_t* node_count,
uint16_t* node_alignments, uint16_t* node_alignment_count,
uint16_t* incoming_edges, uint16_t* incoming_edge_count,
uint16_t* outgoing_edges, uint16_t* outgoing_edge_count,
uint16_t* incoming_edge_w, uint16_t* outgoing_edge_w,
uint16_t* alignment_length,
uint16_t* graph,
int16_t* alignment_graph,
uint8_t* read,
int16_t* alignment_read,
uint16_t* node_coverage_counts,
int8_t* base_weights,
uint16_t* sequence_begin_nodes_ids,
uint16_t* outgoing_edges_coverage,
uint16_t* outgoing_edges_coverage_count,
uint16_t s,
uint32_t max_sequences_per_poa)
{
// all pointers will be allocated in unified memory visible to both host and device
uint16_t new_node_count;
uint8_t error_code = addAlignmentToGraph(new_node_count, nodes,
*node_count,
node_alignments, node_alignment_count,
incoming_edges, incoming_edge_count,
outgoing_edges, outgoing_edge_count,
incoming_edge_w, outgoing_edge_w,
*alignment_length,
graph,
alignment_graph,
read,
alignment_read,
node_coverage_counts,
base_weights,
sequence_begin_nodes_ids,
outgoing_edges_coverage,
outgoing_edges_coverage_count,
s,
max_sequences_per_poa);
*node_count = new_node_count;
}
// Host function that calls the kernel
void addAlignment(uint8_t* nodes,
uint16_t* node_count,
uint16_t* node_alignments, uint16_t* node_alignment_count,
uint16_t* incoming_edges, uint16_t* incoming_edge_count,
uint16_t* outgoing_edges, uint16_t* outgoing_edge_count,
uint16_t* incoming_edge_w, uint16_t* outgoing_edge_w,
uint16_t* alignment_length,
uint16_t* graph,
int16_t* alignment_graph,
uint8_t* read,
int16_t* alignment_read,
uint16_t* node_coverage_counts,
int8_t* base_weights,
uint16_t* sequence_begin_nodes_ids,
uint16_t* outgoing_edges_coverage,
uint16_t* outgoing_edges_coverage_count,
uint16_t s,
uint32_t max_sequences_per_poa)
{
hipLaunchKernelGGL(( addAlignmentKernel), dim3(1), dim3(1), 0, 0, nodes,
node_count,
node_alignments, node_alignment_count,
incoming_edges, incoming_edge_count,
outgoing_edges, outgoing_edge_count,
incoming_edge_w, outgoing_edge_w,
alignment_length,
graph,
alignment_graph,
read,
alignment_read,
node_coverage_counts,
base_weights,
sequence_begin_nodes_ids,
outgoing_edges_coverage,
outgoing_edges_coverage_count,
s,
max_sequences_per_poa);
}
} // namespace cudapoa
} // namespace claragenomics
| 1436aa5d1d7fcc9006358b3b127f2b83be8deac2.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "cudapoa_kernels.cuh"
#include <stdio.h>
namespace claragenomics
{
namespace cudapoa
{
/**
* @brief Device function for adding a new alignment to the partial order alignment graph.
*
* @param[out] new_node_count Number of nodes in graph after update
* @param[in/out] nodes Device buffer with unique nodes in graph
* @param[in] node_count Number of nodes in graph
* @graph[in] node_alignments Device buffer with alignment nodes per node in graph
* @param[in] node_alignment_count Device buffer with number of aligned nodes
* @param[in] incoming_edges Device buffer with incoming edges per node
* @param[in] incoming_edges_count Device buffer with number of incoming edges per node
* @param[in] outgoing_edges Device buffer with outgoing edges per node
* @param[in] outgoing_edges_count Device buffer with number of outgoing edges per node
* @param[in] incoming_edge_w Device buffer with weight of incoming edges
* @param[in] outgoing_edge_w Device buffer with weight of outgoing edges
* @param[in] alignment_length Total length of new alignment
* @param[in] graph Device scratch space with sorted graph
* @param[in] alignment_graph Device buffer with nodes from graph in alignment
* @param[in] read Device scratch space with sequence
* @param[in] alignment_read Device buffer with bases from read in alignment
* @param[in] node_coverage_count Device buffer with coverage of each node in graph
* @param[in] base_weights Device buffer with weight of each node in read
* @param[in] sequence_begin_nodes_ids Device buffer with begining node of each sequence
* @param[in] outgoing_edges_coverage Device buffer with coverage of each edge in graph
* @param[in] outgoing_edges_coverage_count Device buffer with coverage count of each edge in graph
* @param[in] s Current sequence id
* @param[in] max_sequences_per_poa Maximum sequences allowed in a graph
*
* @return Status code for any errors encountered.
*/
template <bool msa = false>
__device__
uint8_t
addAlignmentToGraph(uint16_t& new_node_count,
uint8_t* nodes,
uint16_t node_count,
uint16_t* node_alignments, uint16_t* node_alignment_count,
uint16_t* incoming_edges, uint16_t* incoming_edge_count,
uint16_t* outgoing_edges, uint16_t* outgoing_edge_count,
uint16_t* incoming_edge_w, uint16_t* outgoing_edge_w,
uint16_t alignment_length,
uint16_t* graph,
int16_t* alignment_graph,
uint8_t* read,
int16_t* alignment_read,
uint16_t* node_coverage_counts,
int8_t* base_weights,
uint16_t* sequence_begin_nodes_ids,
uint16_t* outgoing_edges_coverage,
uint16_t* outgoing_edges_coverage_count,
uint16_t s,
uint32_t max_sequences_per_poa)
{
//printf("Running addition for alignment %d\n", alignment_length);
int16_t head_node_id = -1;
int16_t curr_node_id = -1;
uint16_t prev_weight = 0;
// Basic algorithm is to iterate through the alignment of the read.
// For each position in that alignment -
// if it's an insert in the read
// add a new node
// if it is aligned
// check if node base matches read base. if so, move on.
// if node base doesn't match, check other aligned nodes
// if none of the other aligned nodes match, add new node
// else use one of aligned nodes and move on.
for (int16_t pos = alignment_length - 1; pos >= 0; pos--)
{
bool new_node = false;
int16_t read_pos = alignment_read[pos];
// Case where base in read in an insert.
if (read_pos != -1)
{
int8_t NODE_WEIGHT = base_weights[read_pos];
//printf("%c ", read[read_pos]);
uint8_t read_base = read[read_pos];
int16_t graph_node_id = alignment_graph[pos];
if (graph_node_id == -1)
{
// No alignment node found in graph.
// Create new node.
curr_node_id = node_count++;
if (node_count >= CUDAPOA_MAX_NODES_PER_WINDOW)
{
return static_cast<uint8_t>(StatusType::node_count_exceeded_maximum_graph_size);
}
//printf("create new node %d\n", curr_node_id);
new_node = true;
nodes[curr_node_id] = read_base;
outgoing_edge_count[curr_node_id] = 0;
incoming_edge_count[curr_node_id] = 0;
node_alignment_count[curr_node_id] = 0;
node_coverage_counts[curr_node_id] = 0;
}
else
{
// Get base information for aligned node in graph.
uint8_t graph_base = nodes[graph_node_id];
//printf("graph base %c\n", graph_base);
// If bases match, then set current node id to graph node id.
if (graph_base == read_base)
{
//printf("graph and read base are same\n");
curr_node_id = graph_node_id;
}
else
{
// Since bases don't match, iterate through all aligned nodes of
// graph node, and check against their bases. If a base matches,
// then set the current node as that aligned node.
uint16_t num_aligned_node = node_alignment_count[graph_node_id];
//printf("aligned nodes are %d\n", num_aligned_node);
int16_t aligned_node_id = -1;
//printf("looping through alignments\n");
for (uint16_t n = 0; n < num_aligned_node; n++)
{
uint16_t aid = node_alignments[graph_node_id * CUDAPOA_MAX_NODE_ALIGNMENTS + n];
if (nodes[aid] == read_base)
{
aligned_node_id = aid;
break;
}
}
if (aligned_node_id != -1)
{
//printf("found aligned node %d\n", aligned_node_id);
curr_node_id = aligned_node_id;
}
else
{
// However, if none of the nodes in the aligned list match either,
// then create a new node and update the graph node (+ aligned nodes)
// with information about this new node since it also becomes an aligned
// node to the others.
new_node = true;
curr_node_id = node_count++;
if (node_count >= CUDAPOA_MAX_NODES_PER_WINDOW)
{
return static_cast<uint8_t>(StatusType::node_count_exceeded_maximum_graph_size);
}
//printf("create new node %d\n", curr_node_id);
nodes[curr_node_id] = read_base;
outgoing_edge_count[curr_node_id] = 0;
incoming_edge_count[curr_node_id] = 0;
node_alignment_count[curr_node_id] = 0;
node_coverage_counts[curr_node_id] = 0;
uint16_t new_node_alignments = 0;
for (uint16_t n = 0; n < num_aligned_node; n++)
{
uint16_t aid = node_alignments[graph_node_id * CUDAPOA_MAX_NODE_ALIGNMENTS + n];
uint16_t aid_count = node_alignment_count[aid];
node_alignments[aid * CUDAPOA_MAX_NODE_ALIGNMENTS + aid_count] = curr_node_id;
node_alignment_count[aid] = aid_count + 1;
node_alignments[curr_node_id * CUDAPOA_MAX_NODE_ALIGNMENTS + new_node_alignments] = aid;
new_node_alignments++;
}
node_alignments[graph_node_id * CUDAPOA_MAX_NODE_ALIGNMENTS + num_aligned_node] = curr_node_id;
node_alignment_count[graph_node_id] = num_aligned_node + 1;
node_alignments[curr_node_id * CUDAPOA_MAX_NODE_ALIGNMENTS + new_node_alignments] = graph_node_id;
new_node_alignments++;
node_alignment_count[curr_node_id] = new_node_alignments;
}
}
}
if (new_node)
{
//printf("new node %d\n", curr_node_id);
}
// for msa generation
if (msa && (read_pos == 0))
{
//begin node of the sequence, add its node_id (curr_node_id) to sequence_begin_nodes_ids
*sequence_begin_nodes_ids = curr_node_id;
// printf("adding sequence_begin_nodes_ids = %d\n", curr_node_id);
}
// Create new edges if necessary.
if (head_node_id != -1)
{
bool edge_exists = false;
uint16_t in_count = incoming_edge_count[curr_node_id];
for (uint16_t e = 0; e < in_count; e++)
{
if (incoming_edges[curr_node_id * CUDAPOA_MAX_NODE_EDGES + e] == head_node_id)
{
edge_exists = true;
incoming_edge_w[curr_node_id * CUDAPOA_MAX_NODE_EDGES + e] += (prev_weight + NODE_WEIGHT);
//printf("Update existing node from %d to %d with weight %d\n", head_node_id, curr_node_id, incoming_edge_w[curr_node_id * CUDAPOA_MAX_NODE_EDGES + e]);
}
}
if (!edge_exists)
{
incoming_edges[curr_node_id * CUDAPOA_MAX_NODE_EDGES + in_count] = head_node_id;
incoming_edge_w[curr_node_id * CUDAPOA_MAX_NODE_EDGES + in_count] = prev_weight + NODE_WEIGHT;
incoming_edge_count[curr_node_id] = in_count + 1;
uint16_t out_count = outgoing_edge_count[head_node_id];
outgoing_edges[head_node_id * CUDAPOA_MAX_NODE_EDGES + out_count] = curr_node_id;
if (msa)
{
outgoing_edges_coverage_count[head_node_id * CUDAPOA_MAX_NODE_EDGES + out_count] = 1;
outgoing_edges_coverage[(head_node_id * CUDAPOA_MAX_NODE_EDGES + out_count) * max_sequences_per_poa] = s;
}
outgoing_edge_count[head_node_id] = out_count + 1;
//printf("Created new edge %d to %d with weight %d\n", head_node_id, curr_node_id, prev_weight + NODE_WEIGHT);
if (out_count + 1 >= CUDAPOA_MAX_NODE_EDGES || in_count + 1 >= CUDAPOA_MAX_NODE_EDGES)
{
return static_cast<uint8_t>(StatusType::edge_count_exceeded_maximum_graph_size);
//printf("exceeded max edge count\n");
}
}
else if (msa) //if edge exists and for msa generation
{
uint16_t out_count = outgoing_edge_count[head_node_id];
for (uint16_t e = 0; e < out_count; e++)
{
if (outgoing_edges[head_node_id * CUDAPOA_MAX_NODE_EDGES + e] == curr_node_id)
{
uint16_t out_edge_coverage_count = outgoing_edges_coverage_count[head_node_id * CUDAPOA_MAX_NODE_EDGES + e];
outgoing_edges_coverage[(head_node_id * CUDAPOA_MAX_NODE_EDGES + e) * max_sequences_per_poa + out_edge_coverage_count] = s;
outgoing_edges_coverage_count[head_node_id * CUDAPOA_MAX_NODE_EDGES + e] = out_edge_coverage_count + 1;
break;
}
}
}
}
head_node_id = curr_node_id;
// If a node is seen within a graph, then it's part of some
// read, hence its coverage is incremented by 1.
node_coverage_counts[head_node_id]++;
prev_weight = NODE_WEIGHT;
}
}
//printf("final size %d\n", node_count);
new_node_count = node_count;
return static_cast<uint8_t>(StatusType::success);
}
// kernel that calls the addAlignmentToGraph device funtion
__global__ void addAlignmentKernel(uint8_t* nodes,
uint16_t* node_count,
uint16_t* node_alignments, uint16_t* node_alignment_count,
uint16_t* incoming_edges, uint16_t* incoming_edge_count,
uint16_t* outgoing_edges, uint16_t* outgoing_edge_count,
uint16_t* incoming_edge_w, uint16_t* outgoing_edge_w,
uint16_t* alignment_length,
uint16_t* graph,
int16_t* alignment_graph,
uint8_t* read,
int16_t* alignment_read,
uint16_t* node_coverage_counts,
int8_t* base_weights,
uint16_t* sequence_begin_nodes_ids,
uint16_t* outgoing_edges_coverage,
uint16_t* outgoing_edges_coverage_count,
uint16_t s,
uint32_t max_sequences_per_poa)
{
// all pointers will be allocated in unified memory visible to both host and device
uint16_t new_node_count;
uint8_t error_code = addAlignmentToGraph(new_node_count, nodes,
*node_count,
node_alignments, node_alignment_count,
incoming_edges, incoming_edge_count,
outgoing_edges, outgoing_edge_count,
incoming_edge_w, outgoing_edge_w,
*alignment_length,
graph,
alignment_graph,
read,
alignment_read,
node_coverage_counts,
base_weights,
sequence_begin_nodes_ids,
outgoing_edges_coverage,
outgoing_edges_coverage_count,
s,
max_sequences_per_poa);
*node_count = new_node_count;
}
// Host function that calls the kernel
void addAlignment(uint8_t* nodes,
uint16_t* node_count,
uint16_t* node_alignments, uint16_t* node_alignment_count,
uint16_t* incoming_edges, uint16_t* incoming_edge_count,
uint16_t* outgoing_edges, uint16_t* outgoing_edge_count,
uint16_t* incoming_edge_w, uint16_t* outgoing_edge_w,
uint16_t* alignment_length,
uint16_t* graph,
int16_t* alignment_graph,
uint8_t* read,
int16_t* alignment_read,
uint16_t* node_coverage_counts,
int8_t* base_weights,
uint16_t* sequence_begin_nodes_ids,
uint16_t* outgoing_edges_coverage,
uint16_t* outgoing_edges_coverage_count,
uint16_t s,
uint32_t max_sequences_per_poa)
{
addAlignmentKernel<<<1, 1>>>(nodes,
node_count,
node_alignments, node_alignment_count,
incoming_edges, incoming_edge_count,
outgoing_edges, outgoing_edge_count,
incoming_edge_w, outgoing_edge_w,
alignment_length,
graph,
alignment_graph,
read,
alignment_read,
node_coverage_counts,
base_weights,
sequence_begin_nodes_ids,
outgoing_edges_coverage,
outgoing_edges_coverage_count,
s,
max_sequences_per_poa);
}
} // namespace cudapoa
} // namespace claragenomics
|
1e99c61770e65751f1504e4af2ffed1fca4738ba.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `max`
#include <cudf/detail/reduction_functions.hpp>
#include "simple_hip.cuh"
std::unique_ptr<cudf::scalar> cudf::experimental::reduction::all(
column_view const& col, cudf::data_type const output_dtype,
rmm::mr::device_memory_resource* mr, hipStream_t stream)
{
CUDF_EXPECTS(output_dtype == cudf::data_type(cudf::BOOL8), "all() operation can be applied with output type `bool8` only");
return cudf::experimental::reduction::min(col, cudf::data_type(cudf::BOOL8), mr, stream);
}
| 1e99c61770e65751f1504e4af2ffed1fca4738ba.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `max`
#include <cudf/detail/reduction_functions.hpp>
#include "simple.cuh"
std::unique_ptr<cudf::scalar> cudf::experimental::reduction::all(
column_view const& col, cudf::data_type const output_dtype,
rmm::mr::device_memory_resource* mr, cudaStream_t stream)
{
CUDF_EXPECTS(output_dtype == cudf::data_type(cudf::BOOL8), "all() operation can be applied with output type `bool8` only");
return cudf::experimental::reduction::min(col, cudf::data_type(cudf::BOOL8), mr, stream);
}
|
ceb112a8d9d271aed99e929e88d11218b8cf42fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
__global__ void vec_add(float *A, float *B, float* C, int size)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if(index<size)
C[index] = A[index] + B[index];
} | ceb112a8d9d271aed99e929e88d11218b8cf42fe.cu | #include "includes.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
__global__ void vec_add(float *A, float *B, float* C, int size)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if(index<size)
C[index] = A[index] + B[index];
} |
20bfefefed957fa1ede9cb0bb5bd53bc3f4489f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "local_contrast_subtractive_2d_layer_updater_cuda.h"
#include "../local_contrast_subtractive_layer.h"
#include "../neural_network_exception.h"
#include "../nn_types.h"
#include "util_cuda.h"
namespace nnforge
{
namespace cuda
{
__global__ void local_contrast_subtractive_2d_blur_horizontal_upd_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int window_width,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_width; ++i)
{
current_weights++;
if (i < x + 1)
current_input_low--;
if (i > x + 1)
current_input_low++;
if (i < width - x)
current_input_high++;
if (i > width - x)
current_input_high--;
res += (*current_input_low + *current_input_high) * *current_weights;
}
output[(z * height + y) * width + x] = res;
}
}
template<int WINDOW_WIDTH>
__global__ void local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
current_weights++;
if (i < x + 1)
current_input_low--;
if (i > x + 1)
current_input_low++;
if (i < width - x)
current_input_high++;
if (i > width - x)
current_input_high--;
res += (*current_input_low + *current_input_high) * *current_weights;
}
output[(z * height + y) * width + x] = res;
}
}
__global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_upd_kernel(
const float * __restrict input,
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int window_height,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_height; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = original_input[offset] - res;
}
}
template<int WINDOW_HEIGHT>
__global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel(
const float * __restrict input,
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_HEIGHT; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = original_input[offset] - res;
}
}
__global__ void local_contrast_subtractive_2d_copy_unaffected_upd_kernel(
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict unaffected_feature_map_list,
int input_feature_map_count,
int unaffected_feature_map_count,
int elem_count_per_fature_map,
int entry_count)
{
int elem_id = blockIdx.x * blockDim.x + threadIdx.x;
int unaffected_feature_map_index = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (elem_id < elem_count_per_fature_map) && (unaffected_feature_map_index < unaffected_feature_map_count) && (entry_id < entry_count);
if (in_bounds)
{
int unaffected_feature_map_id = unaffected_feature_map_list[unaffected_feature_map_index];
int offset = (entry_id * input_feature_map_count + unaffected_feature_map_id) * elem_count_per_fature_map + elem_id;
output[offset] = original_input[offset];
}
}
local_contrast_subtractive_2d_layer_updater_cuda::local_contrast_subtractive_2d_layer_updater_cuda()
{
}
local_contrast_subtractive_2d_layer_updater_cuda::~local_contrast_subtractive_2d_layer_updater_cuda()
{
}
void local_contrast_subtractive_2d_layer_updater_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_1st_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[0])
{
case 1:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<1>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<2>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<3>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<4>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<5>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<6>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<7>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<8>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<9>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<10>), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id, *input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_horizontal_upd_kernel), dim3(kernel_1st_dims.first), dim3(kernel_1st_dims.second), 0, stream_id,
*input_buffers[0],
*temporary_working_per_entry_buffer,
*schema_data[0],
*schema_data[1],
output_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[0],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
std::pair<dim3, dim3> kernel_2nd_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[1])
{
case 1:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<1>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<2>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<3>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<4>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<5>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<6>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<7>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<8>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<9>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<10>), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id, *temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
hipLaunchKernelGGL(( local_contrast_subtractive_2d_blur_vertical_and_subtract_upd_kernel), dim3(kernel_2nd_dims.first), dim3(kernel_2nd_dims.second), 0, stream_id,
*temporary_working_per_entry_buffer,
*input_buffers[0],
*output_buffer,
*schema_data[0],
*schema_data[2],
output_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
if ((unaffected_feature_map_count > 0) && ((const float *)*input_buffers[0] != (const float *)*output_buffer))
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_elem_count_per_feature_map,
unaffected_feature_map_count,
entry_count);
hipLaunchKernelGGL(( local_contrast_subtractive_2d_copy_unaffected_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_buffers[0],
*output_buffer,
*schema_data[3],
output_configuration_specific.feature_map_count,
unaffected_feature_map_count,
output_elem_count_per_feature_map,
entry_count);
}
}
void local_contrast_subtractive_2d_layer_updater_cuda::updater_configured()
{
nnforge_shared_ptr<const local_contrast_subtractive_layer> layer_derived = nnforge_dynamic_pointer_cast<const local_contrast_subtractive_layer>(layer_schema);
affected_feature_map_count = static_cast<int>(layer_derived->feature_maps_affected.size());
unaffected_feature_map_count = static_cast<int>(layer_derived->feature_maps_unaffected.size());
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
half_window_sizes.push_back(static_cast<int>((*it + 1) >> 1));
central_mult = 1.0F - (2.0F * layer_derived->window_weights_list[0][0] * layer_derived->window_weights_list[1][0]);
}
size_t local_contrast_subtractive_2d_layer_updater_cuda::get_temporary_working_per_entry_buffer_size(const layer_action& action) const
{
return output_elem_count_per_feature_map * affected_feature_map_count * sizeof(float);
}
int local_contrast_subtractive_2d_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const
{
return 0;
}
}
}
| 20bfefefed957fa1ede9cb0bb5bd53bc3f4489f6.cu | /*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "local_contrast_subtractive_2d_layer_updater_cuda.h"
#include "../local_contrast_subtractive_layer.h"
#include "../neural_network_exception.h"
#include "../nn_types.h"
#include "util_cuda.h"
namespace nnforge
{
namespace cuda
{
__global__ void local_contrast_subtractive_2d_blur_horizontal_upd_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int window_width,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_width; ++i)
{
current_weights++;
if (i < x + 1)
current_input_low--;
if (i > x + 1)
current_input_low++;
if (i < width - x)
current_input_high++;
if (i > width - x)
current_input_high--;
res += (*current_input_low + *current_input_high) * *current_weights;
}
output[(z * height + y) * width + x] = res;
}
}
template<int WINDOW_WIDTH>
__global__ void local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel(
const float * __restrict input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)(((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
current_weights++;
if (i < x + 1)
current_input_low--;
if (i > x + 1)
current_input_low++;
if (i < width - x)
current_input_high++;
if (i > width - x)
current_input_high--;
res += (*current_input_low + *current_input_high) * *current_weights;
}
output[(z * height + y) * width + x] = res;
}
}
__global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_upd_kernel(
const float * __restrict input,
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int window_height,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll 4
for(int i = 1; i < window_height; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = original_input[offset] - res;
}
}
template<int WINDOW_HEIGHT>
__global__ void local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel(
const float * __restrict input,
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict affected_feature_map_list,
const float * __restrict weights,
int input_feature_map_count,
int affected_feature_map_count,
int width,
int height,
int entry_count)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
int entry_id = z / affected_feature_map_count;
bool in_bounds = (x < width) && (y < height) && (entry_id < entry_count);
if (in_bounds)
{
int affected_feature_map_index = z - (entry_id * affected_feature_map_count);
int affected_feature_map_id = affected_feature_map_list[affected_feature_map_index];
const float * current_input = input + (int)((z * height + y) * width + x);
const float * current_input_low = current_input;
const float * current_input_high = current_input;
const float * current_weights = weights;
float res = *current_input * *current_weights;
#pragma unroll
for(int i = 1; i < WINDOW_HEIGHT; ++i)
{
current_weights++;
if (i < y + 1)
current_input_low -= width;
if (i > y + 1)
current_input_low += width;
if (i < height - y)
current_input_high += width;
if (i > height - y)
current_input_high -= width;
res += (*current_input_low + *current_input_high) * *current_weights;
}
int offset = ((entry_id * input_feature_map_count + affected_feature_map_id) * height + y) * width + x;
output[offset] = original_input[offset] - res;
}
}
__global__ void local_contrast_subtractive_2d_copy_unaffected_upd_kernel(
const float * __restrict original_input,
float * __restrict output,
const unsigned int * __restrict unaffected_feature_map_list,
int input_feature_map_count,
int unaffected_feature_map_count,
int elem_count_per_fature_map,
int entry_count)
{
int elem_id = blockIdx.x * blockDim.x + threadIdx.x;
int unaffected_feature_map_index = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (elem_id < elem_count_per_fature_map) && (unaffected_feature_map_index < unaffected_feature_map_count) && (entry_id < entry_count);
if (in_bounds)
{
int unaffected_feature_map_id = unaffected_feature_map_list[unaffected_feature_map_index];
int offset = (entry_id * input_feature_map_count + unaffected_feature_map_id) * elem_count_per_fature_map + elem_id;
output[offset] = original_input[offset];
}
}
local_contrast_subtractive_2d_layer_updater_cuda::local_contrast_subtractive_2d_layer_updater_cuda()
{
}
local_contrast_subtractive_2d_layer_updater_cuda::~local_contrast_subtractive_2d_layer_updater_cuda()
{
}
void local_contrast_subtractive_2d_layer_updater_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_1st_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[0])
{
case 1:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<1><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<2><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<3><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<4><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<5><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<6><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<7><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<8><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<9><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
local_contrast_subtractive_2d_blur_horizontal_exact_upd_kernel<10><<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(*input_buffers[0], *temporary_working_per_entry_buffer, *schema_data[0], *schema_data[1], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
local_contrast_subtractive_2d_blur_horizontal_upd_kernel<<<kernel_1st_dims.first, kernel_1st_dims.second, 0, stream_id>>>(
*input_buffers[0],
*temporary_working_per_entry_buffer,
*schema_data[0],
*schema_data[1],
output_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[0],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
std::pair<dim3, dim3> kernel_2nd_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
affected_feature_map_count * entry_count);
switch(half_window_sizes[1])
{
case 1:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<1><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 2:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<2><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 3:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<3><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 4:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<4><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 5:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<5><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 6:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<6><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 7:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<7><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 8:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<8><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 9:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<9><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
case 10:
local_contrast_subtractive_2d_blur_vertical_and_subtract_exact_upd_kernel<10><<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(*temporary_working_per_entry_buffer, *input_buffers[0], *output_buffer, *schema_data[0], *schema_data[2], output_configuration_specific.feature_map_count, affected_feature_map_count, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], entry_count);
break;
default:
local_contrast_subtractive_2d_blur_vertical_and_subtract_upd_kernel<<<kernel_2nd_dims.first, kernel_2nd_dims.second, 0, stream_id>>>(
*temporary_working_per_entry_buffer,
*input_buffers[0],
*output_buffer,
*schema_data[0],
*schema_data[2],
output_configuration_specific.feature_map_count,
affected_feature_map_count,
half_window_sizes[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count);
break;
}
if ((unaffected_feature_map_count > 0) && ((const float *)*input_buffers[0] != (const float *)*output_buffer))
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_elem_count_per_feature_map,
unaffected_feature_map_count,
entry_count);
local_contrast_subtractive_2d_copy_unaffected_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_buffers[0],
*output_buffer,
*schema_data[3],
output_configuration_specific.feature_map_count,
unaffected_feature_map_count,
output_elem_count_per_feature_map,
entry_count);
}
}
void local_contrast_subtractive_2d_layer_updater_cuda::updater_configured()
{
nnforge_shared_ptr<const local_contrast_subtractive_layer> layer_derived = nnforge_dynamic_pointer_cast<const local_contrast_subtractive_layer>(layer_schema);
affected_feature_map_count = static_cast<int>(layer_derived->feature_maps_affected.size());
unaffected_feature_map_count = static_cast<int>(layer_derived->feature_maps_unaffected.size());
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
half_window_sizes.push_back(static_cast<int>((*it + 1) >> 1));
central_mult = 1.0F - (2.0F * layer_derived->window_weights_list[0][0] * layer_derived->window_weights_list[1][0]);
}
size_t local_contrast_subtractive_2d_layer_updater_cuda::get_temporary_working_per_entry_buffer_size(const layer_action& action) const
{
return output_elem_count_per_feature_map * affected_feature_map_count * sizeof(float);
}
int local_contrast_subtractive_2d_layer_updater_cuda::get_input_index_layer_can_write(const layer_action& action) const
{
return 0;
}
}
}
|
ba98bfcc07cfaff63943b391889139d99b8eca88.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define N 2048
#define thread_num 512
#define block_num 4
__global__ void prescan(float *g_odata, float *g_idata, int n);
void scanCPU(float *f_out, float *f_in, int i_n);
int main()
{
float a[N], c[N], g[N];
float *dev_a, *dev_g;
int size = N * sizeof(float);
double d_gpuTime, d_cpuTime;
hipError_t error;
// initialize matrice a
for (int i = 0; i < N; i++)
{
a[i] = i + 1;
printf("a[%i] = %f\n", i, a[i]);
}
// initialize a and g matrices here
error = hipMalloc((void **)&dev_a, size);
error = hipMalloc((void **)&dev_g, size);
error = hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
//execute kernel
prescan << <block_num, thread_num, 2 * thread_num*sizeof(float) >> >(dev_g, dev_a, N);
hipDeviceSynchronize();
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipMemcpy(g, dev_g, size, hipMemcpyDeviceToHost);
clock_t cpu_startTime, cpu_endTime;
float cpu_ElapseTime = 0;
cpu_startTime = clock();
scanCPU(c, a, N);
cpu_endTime = clock();
cpu_ElapseTime = (double)(cpu_endTime - cpu_startTime) / CLOCKS_PER_SEC;
hipFree(dev_a); hipFree(dev_g);
for (int i = 0; i < N; i++)
{
printf("c[%i] = %0.3f, g[%i] = %0.3f\n", i, c[i], i, g[i]);
}
//printf("start= %.100f msec\nend= %.100f msec\n", (float)cpu_startTime, (float)cpu_endTime);
// Compute and print the gpu time
///printf("GPU Time= %.3f msec\nCPU Time= %.100f msec\n", msecTotal, cpu_ElapseTime);
//printf("CPU Time= %.100f msec\n", cpu_ElapseTime);
printf("GPU Time= %.3f msec\n", msecTotal);
// printf("GPU Time for scan size %i: %f\n", N, d_gpuTime);
// printf("CPU Time for scan size %i: %f\n", N, d_cpuTime);
system("PAUSE");
}
__global__ void prescan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp[];
// allocated on invocation
int thid = threadIdx.x;
int bid = blockIdx.x;
int offset = 1;
if ((bid * thread_num + thid)<n) {
temp[thid] = g_idata[bid * thread_num + thid];
}
else {
temp[thid] = 0;
} // Make the "empty" spots zeros, so it won't affect the final result.
for (int d = thread_num >> 1; d > 0; d >>= 1)
// build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0)
{
temp[thread_num - 1] = 0;
}
// clear the last element
for (int d = 1; d < thread_num; d *= 2)
// traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[bid * thread_num + thid] = temp[thid];
}
void scanCPU(float *f_out, float *f_in, int i_n)
{
f_out[0] = 0;
for (int i = 1; i < i_n; i++)
f_out[i] = f_out[i - 1] + f_in[i - 1];
}
| ba98bfcc07cfaff63943b391889139d99b8eca88.cu | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define N 2048
#define thread_num 512
#define block_num 4
__global__ void prescan(float *g_odata, float *g_idata, int n);
void scanCPU(float *f_out, float *f_in, int i_n);
int main()
{
float a[N], c[N], g[N];
float *dev_a, *dev_g;
int size = N * sizeof(float);
double d_gpuTime, d_cpuTime;
cudaError_t error;
// initialize matrice a
for (int i = 0; i < N; i++)
{
a[i] = i + 1;
printf("a[%i] = %f\n", i, a[i]);
}
// initialize a and g matrices here
error = cudaMalloc((void **)&dev_a, size);
error = cudaMalloc((void **)&dev_g, size);
error = cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
//execute kernel
prescan << <block_num, thread_num, 2 * thread_num*sizeof(float) >> >(dev_g, dev_a, N);
cudaDeviceSynchronize();
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaMemcpy(g, dev_g, size, cudaMemcpyDeviceToHost);
clock_t cpu_startTime, cpu_endTime;
float cpu_ElapseTime = 0;
cpu_startTime = clock();
scanCPU(c, a, N);
cpu_endTime = clock();
cpu_ElapseTime = (double)(cpu_endTime - cpu_startTime) / CLOCKS_PER_SEC;
cudaFree(dev_a); cudaFree(dev_g);
for (int i = 0; i < N; i++)
{
printf("c[%i] = %0.3f, g[%i] = %0.3f\n", i, c[i], i, g[i]);
}
//printf("start= %.100f msec\nend= %.100f msec\n", (float)cpu_startTime, (float)cpu_endTime);
// Compute and print the gpu time
///printf("GPU Time= %.3f msec\nCPU Time= %.100f msec\n", msecTotal, cpu_ElapseTime);
//printf("CPU Time= %.100f msec\n", cpu_ElapseTime);
printf("GPU Time= %.3f msec\n", msecTotal);
// printf("GPU Time for scan size %i: %f\n", N, d_gpuTime);
// printf("CPU Time for scan size %i: %f\n", N, d_cpuTime);
system("PAUSE");
}
__global__ void prescan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp[];
// allocated on invocation
int thid = threadIdx.x;
int bid = blockIdx.x;
int offset = 1;
if ((bid * thread_num + thid)<n) {
temp[thid] = g_idata[bid * thread_num + thid];
}
else {
temp[thid] = 0;
} // Make the "empty" spots zeros, so it won't affect the final result.
for (int d = thread_num >> 1; d > 0; d >>= 1)
// build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0)
{
temp[thread_num - 1] = 0;
}
// clear the last element
for (int d = 1; d < thread_num; d *= 2)
// traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[bid * thread_num + thid] = temp[thid];
}
void scanCPU(float *f_out, float *f_in, int i_n)
{
f_out[0] = 0;
for (int i = 1; i < i_n; i++)
f_out[i] = f_out[i - 1] + f_in[i - 1];
}
|
4c151c66cd4f1f272ee2321345fecf085f2b543a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define SIZE 1000
#define NUM_BIN 256
__global__ void histogram_shared_memory(int *d_b, int *d_a)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int offset = blockDim.x * gridDim.x;
__shared__ int cache[256];
cache[threadIdx.x] = 0;
__syncthreads();
while (tid < SIZE)
{
atomicAdd(&(cache[d_a[tid]]), 1);
tid += offset;
}
__syncthreads();
atomicAdd(&(d_b[threadIdx.x]), cache[threadIdx.x]);
}
int main()
{
// generate the input array on the host
int h_a[SIZE];
for (int i = 0; i < SIZE; i++) {
//h_a[i] = bit_reverse(i, log2(SIZE));
h_a[i] = i % NUM_BIN;
}
int h_b[NUM_BIN];
for (int i = 0; i < NUM_BIN; i++) {
h_b[i] = 0;
}
// declare GPU memory pointers
int * d_a;
int * d_b;
// allocate GPU memory
hipMalloc((void **)&d_a, SIZE * sizeof(int));
hipMalloc((void **)&d_b, NUM_BIN * sizeof(int));
// transfer the arrays to the GPU
hipMemcpy(d_a, h_a, SIZE * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, NUM_BIN * sizeof(int), hipMemcpyHostToDevice);
// launch the kernel
histogram_shared_memory << <SIZE / 256, 256 >> >(d_b, d_a);
// copy back the result from GPU
hipMemcpy(h_b, d_b, NUM_BIN * sizeof(int), hipMemcpyDeviceToHost);
printf("Histogram using 16 bin is: ");
for (int i = 0; i < NUM_BIN; i++) {
printf("bin %d: count %d\n", i, h_b[i]);
}
// free GPU memory allocation
hipFree(d_a);
hipFree(d_b);
return 0;
}
| 4c151c66cd4f1f272ee2321345fecf085f2b543a.cu | #include <stdio.h>
#include <cuda_runtime.h>
#define SIZE 1000
#define NUM_BIN 256
__global__ void histogram_shared_memory(int *d_b, int *d_a)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int offset = blockDim.x * gridDim.x;
__shared__ int cache[256];
cache[threadIdx.x] = 0;
__syncthreads();
while (tid < SIZE)
{
atomicAdd(&(cache[d_a[tid]]), 1);
tid += offset;
}
__syncthreads();
atomicAdd(&(d_b[threadIdx.x]), cache[threadIdx.x]);
}
int main()
{
// generate the input array on the host
int h_a[SIZE];
for (int i = 0; i < SIZE; i++) {
//h_a[i] = bit_reverse(i, log2(SIZE));
h_a[i] = i % NUM_BIN;
}
int h_b[NUM_BIN];
for (int i = 0; i < NUM_BIN; i++) {
h_b[i] = 0;
}
// declare GPU memory pointers
int * d_a;
int * d_b;
// allocate GPU memory
cudaMalloc((void **)&d_a, SIZE * sizeof(int));
cudaMalloc((void **)&d_b, NUM_BIN * sizeof(int));
// transfer the arrays to the GPU
cudaMemcpy(d_a, h_a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, NUM_BIN * sizeof(int), cudaMemcpyHostToDevice);
// launch the kernel
histogram_shared_memory << <SIZE / 256, 256 >> >(d_b, d_a);
// copy back the result from GPU
cudaMemcpy(h_b, d_b, NUM_BIN * sizeof(int), cudaMemcpyDeviceToHost);
printf("Histogram using 16 bin is: ");
for (int i = 0; i < NUM_BIN; i++) {
printf("bin %d: count %d\n", i, h_b[i]);
}
// free GPU memory allocation
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
6b1d1473de574276bbdb9fa62f96d8b955f1a6e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Forward_cpu(bottom, top);
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
// TODO(Yangqing): implement the GPU version of softmax.
Backward_cpu(top, propagate_down, bottom);
}
#if 0
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
CHECK_LE(ignore_label_.size(), 1);
const bool has_ignore_label = !ignore_label_.empty();
const int ignore_label = (has_ignore_label) ? *ignore_label_.begin() : 0;
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label, ignore_label, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
loss /= count;
} else {
loss /= outer_num_;
}
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
CHECK_LE(ignore_label_.size(), 1);
const bool has_ignore_label = !ignore_label_.empty();
const int ignore_label = (has_ignore_label) ? *ignore_label_.begin() : 0;
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label, ignore_label, counts);
const Dtype loss_weight = top[0]->cpu_diff()[0];
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
} else {
caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff);
}
}
}
#endif
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
| 6b1d1473de574276bbdb9fa62f96d8b955f1a6e0.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Forward_cpu(bottom, top);
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
// TODO(Yangqing): implement the GPU version of softmax.
Backward_cpu(top, propagate_down, bottom);
}
#if 0
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
CHECK_LE(ignore_label_.size(), 1);
const bool has_ignore_label = !ignore_label_.empty();
const int ignore_label = (has_ignore_label) ? *ignore_label_.begin() : 0;
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label, ignore_label, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
loss /= count;
} else {
loss /= outer_num_;
}
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
CHECK_LE(ignore_label_.size(), 1);
const bool has_ignore_label = !ignore_label_.empty();
const int ignore_label = (has_ignore_label) ? *ignore_label_.begin() : 0;
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label, ignore_label, counts);
const Dtype loss_weight = top[0]->cpu_diff()[0];
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
} else {
caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff);
}
}
}
#endif
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
20d723aa7fca6cbb604298c7034c8f3f857ff424.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/adamax_impl.cuh"
#include "include/hip/hip_fp16.h"
template <typename T>
__device__ __forceinline__ T SqrtFunc(T input) {
return sqrt(input);
}
template <>
__device__ __forceinline__ half SqrtFunc(half input) {
return hsqrt(input);
}
template <typename T>
__device__ __forceinline__ T MaxFunc(T input1, T input2) {
return (input1 > input2) ? input1 : input2;
}
template <typename T>
__device__ __forceinline__ T AbsFunc(T input) {
const T zero = static_cast<T>(0);
return (input >= zero) ? input : -input;
}
template <typename T, typename S, typename G>
__global__ void ApplyAdamaxKernal(const size_t size, const S *b1_power, const S *learning_rate, const S *b1,
const S *b2, const S *eps, const G *gradient, T *variable, T *m, T *v) {
const S one = static_cast<S>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = b1[0] * m[i] + (one - b1[0]) * gradient[i];
v[i] = MaxFunc(b2[0] * v[i], AbsFunc(gradient[i]));
variable[i] -= learning_rate[0] * m[i] / (one - b1_power[0]) / (v[i] + eps[0]);
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const float *b1_power, const float *learning_rate, const float *b1,
const float *b2, const float *eps, const half *gradient, half *variable, half *m,
half *v) {
const float one = static_cast<float>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = __float2half(b1[0]) * m[i] + __float2half(one - b1[0]) * gradient[i];
v[i] = MaxFunc(__float2half(b2[0]) * v[i], AbsFunc(gradient[i]));
variable[i] -=
__float2half(learning_rate[0]) * m[i] / __float2half(one - b1_power[0]) / (v[i] + __float2half(eps[0]));
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const float *b1_power, const float *learning_rate, const float *b1,
const float *b2, const float *eps, const half *gradient, float *variable, float *m,
float *v) {
const float one = static_cast<float>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = b1[0] * m[i] + (one - b1[0]) * __half2float(gradient[i]);
v[i] = MaxFunc(b2[0] * v[i], AbsFunc(__half2float(gradient[i])));
variable[i] -= learning_rate[0] * m[i] / (one - b1_power[0]) / (v[i] + eps[0]);
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const half *b1_power, const half *learning_rate, const half *b1,
const half *b2, const half *eps, const float *gradient, float *variable, float *m,
float *v) {
const half one = static_cast<half>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = __half2float(b1[0]) * m[i] + __half2float(one - b1[0]) * gradient[i];
v[i] = MaxFunc(__half2float(b2[0]) * v[i], AbsFunc(gradient[i]));
variable[i] -=
__half2float(learning_rate[0]) * m[i] / __half2float(one - b1_power[0]) / (v[i] + __half2float(eps[0]));
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const half *b1_power, const half *learning_rate, const half *b1,
const half *b2, const half *eps, const double *gradient, double *variable, double *m,
double *v) {
const half one = static_cast<half>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = __half2float(b1[0]) * m[i] + __half2float(one - b1[0]) * gradient[i];
v[i] = MaxFunc(__half2float(b2[0]) * v[i], AbsFunc(gradient[i]));
variable[i] -=
__half2float(learning_rate[0]) * m[i] / __half2float(one - b1_power[0]) / (v[i] + __half2float(eps[0]));
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const float *b1_power, const float *learning_rate, const float *b1,
const float *b2, const float *eps, const double *gradient, double *variable,
double *m, double *v) {
const float one = static_cast<float>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = b1[0] * m[i] + (one - b1[0]) * gradient[i];
v[i] = MaxFunc(b2[0] * v[i], AbsFunc(gradient[i]));
variable[i] -= learning_rate[0] * m[i] / (one - b1_power[0]) / (v[i] + eps[0]);
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const float *b1_power, const float *learning_rate, const float *b1,
const float *b2, const float *eps, const float *gradient, half *variable, half *m,
half *v) {
const float one = static_cast<float>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = __float2half(b1[0]) * m[i] + __float2half(one - b1[0]) * __float2half(gradient[i]);
v[i] = MaxFunc(__float2half(b2[0]) * v[i], AbsFunc(__float2half(gradient[i])));
variable[i] -=
__float2half(learning_rate[0]) * m[i] / __float2half(one - b1_power[0]) / (v[i] + __float2half(eps[0]));
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const float *b1_power, const float *learning_rate, const float *b1,
const float *b2, const float *eps, const float *gradient, double *variable, double *m,
double *v) {
const float one = static_cast<float>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = b1[0] * m[i] + (one - b1[0]) * gradient[i];
v[i] = MaxFunc(b2[0] * v[i], static_cast<double>(AbsFunc(gradient[i])));
variable[i] -= learning_rate[0] * m[i] / (one - b1_power[0]) / (v[i] + eps[0]);
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const half *b1_power, const half *learning_rate, const half *b1,
const half *b2, const half *eps, const half *gradient, double *variable, double *m,
double *v) {
const half one = static_cast<half>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = __half2float(b1[0]) * m[i] + __half2float(one - b1[0]) * __half2float(gradient[i]);
v[i] = MaxFunc(__half2float(b2[0]) * v[i], static_cast<double>(AbsFunc(__half2float(gradient[i]))));
variable[i] -=
__half2float(learning_rate[0]) * m[i] / __half2float(one - b1_power[0]) / (v[i] + __half2float(eps[0]));
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const half *b1_power, const half *learning_rate, const half *b1,
const half *b2, const half *eps, const half *gradient, float *variable, float *m,
float *v) {
const half one = static_cast<half>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = __half2float(b1[0]) * m[i] + __half2float(one - b1[0]) * __half2float(gradient[i]);
v[i] = MaxFunc(__half2float(b2[0]) * v[i], AbsFunc(__half2float(gradient[i])));
variable[i] -=
__half2float(learning_rate[0]) * m[i] / __half2float(one - b1_power[0]) / (v[i] + __half2float(eps[0]));
}
}
template <typename T, typename S, typename G>
hipError_t ApplyAdamax(const size_t size, const S *b1_power, const S *learning_rate, const S *b1, const S *b2,
const S *eps, const G *gradient, T *variable, T *m, T *v, const uint32_t &device_id,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( ApplyAdamaxKernal), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream,
size, b1_power, learning_rate, b1, b2, eps, gradient, variable, m, v);
CHECK_CUDA_LAUNCH_SUCCESS();
}
template CUDA_LIB_EXPORT hipError_t ApplyAdamax<double, double, double>(
const size_t size, const double *b1_power, const double *learning_rate, const double *b1, const double *b2,
const double *eps, const double *gradient, double *variable, double *m, double *v, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdamax<float, float, float>(const size_t size, const float *b1_power,
const float *learning_rate, const float *b1,
const float *b2, const float *eps,
const float *gradient, float *variable, float *m,
float *v, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdamax<half, half, half>(
const size_t size, const half *b1_power, const half *learning_rate, const half *b1, const half *b2, const half *eps,
const half *gradient, half *variable, half *m, half *v, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdamax<half, float, half>(const size_t size, const float *b1_power,
const float *learning_rate, const float *b1,
const float *b2, const float *eps,
const half *gradient, half *variable, half *m,
half *v, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdamax<float, float, half>(const size_t size, const float *b1_power,
const float *learning_rate, const float *b1,
const float *b2, const float *eps,
const half *gradient, float *variable, float *m,
float *v, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdamax<float, half, float>(
const size_t size, const half *b1_power, const half *learning_rate, const half *b1, const half *b2, const half *eps,
const float *gradient, float *variable, float *m, float *v, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdamax<half, float, float>(const size_t size, const float *b1_power,
const float *learning_rate, const float *b1,
const float *b2, const float *eps,
const float *gradient, half *variable, half *m,
half *v, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdamax<float, half, half>(
const size_t size, const half *b1_power, const half *learning_rate, const half *b1, const half *b2, const half *eps,
const half *gradient, float *variable, float *m, float *v, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdamax<double, half, half>(
const size_t size, const half *b1_power, const half *learning_rate, const half *b1, const half *b2, const half *eps,
const half *gradient, double *variable, double *m, double *v, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdamax<double, half, double>(
const size_t size, const half *b1_power, const half *learning_rate, const half *b1, const half *b2, const half *eps,
const double *gradient, double *variable, double *m, double *v, const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdamax<double, float, double>(const size_t size, const float *b1_power,
const float *learning_rate, const float *b1,
const float *b2, const float *eps,
const double *gradient, double *variable,
double *m, double *v, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t ApplyAdamax<double, float, float>(const size_t size, const float *b1_power,
const float *learning_rate, const float *b1,
const float *b2, const float *eps,
const float *gradient, double *variable,
double *m, double *v, const uint32_t &device_id,
hipStream_t cuda_stream);
| 20d723aa7fca6cbb604298c7034c8f3f857ff424.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/adamax_impl.cuh"
#include "include/cuda_fp16.h"
template <typename T>
__device__ __forceinline__ T SqrtFunc(T input) {
return sqrt(input);
}
template <>
__device__ __forceinline__ half SqrtFunc(half input) {
return hsqrt(input);
}
template <typename T>
__device__ __forceinline__ T MaxFunc(T input1, T input2) {
return (input1 > input2) ? input1 : input2;
}
template <typename T>
__device__ __forceinline__ T AbsFunc(T input) {
const T zero = static_cast<T>(0);
return (input >= zero) ? input : -input;
}
template <typename T, typename S, typename G>
__global__ void ApplyAdamaxKernal(const size_t size, const S *b1_power, const S *learning_rate, const S *b1,
const S *b2, const S *eps, const G *gradient, T *variable, T *m, T *v) {
const S one = static_cast<S>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = b1[0] * m[i] + (one - b1[0]) * gradient[i];
v[i] = MaxFunc(b2[0] * v[i], AbsFunc(gradient[i]));
variable[i] -= learning_rate[0] * m[i] / (one - b1_power[0]) / (v[i] + eps[0]);
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const float *b1_power, const float *learning_rate, const float *b1,
const float *b2, const float *eps, const half *gradient, half *variable, half *m,
half *v) {
const float one = static_cast<float>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = __float2half(b1[0]) * m[i] + __float2half(one - b1[0]) * gradient[i];
v[i] = MaxFunc(__float2half(b2[0]) * v[i], AbsFunc(gradient[i]));
variable[i] -=
__float2half(learning_rate[0]) * m[i] / __float2half(one - b1_power[0]) / (v[i] + __float2half(eps[0]));
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const float *b1_power, const float *learning_rate, const float *b1,
const float *b2, const float *eps, const half *gradient, float *variable, float *m,
float *v) {
const float one = static_cast<float>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = b1[0] * m[i] + (one - b1[0]) * __half2float(gradient[i]);
v[i] = MaxFunc(b2[0] * v[i], AbsFunc(__half2float(gradient[i])));
variable[i] -= learning_rate[0] * m[i] / (one - b1_power[0]) / (v[i] + eps[0]);
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const half *b1_power, const half *learning_rate, const half *b1,
const half *b2, const half *eps, const float *gradient, float *variable, float *m,
float *v) {
const half one = static_cast<half>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = __half2float(b1[0]) * m[i] + __half2float(one - b1[0]) * gradient[i];
v[i] = MaxFunc(__half2float(b2[0]) * v[i], AbsFunc(gradient[i]));
variable[i] -=
__half2float(learning_rate[0]) * m[i] / __half2float(one - b1_power[0]) / (v[i] + __half2float(eps[0]));
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const half *b1_power, const half *learning_rate, const half *b1,
const half *b2, const half *eps, const double *gradient, double *variable, double *m,
double *v) {
const half one = static_cast<half>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = __half2float(b1[0]) * m[i] + __half2float(one - b1[0]) * gradient[i];
v[i] = MaxFunc(__half2float(b2[0]) * v[i], AbsFunc(gradient[i]));
variable[i] -=
__half2float(learning_rate[0]) * m[i] / __half2float(one - b1_power[0]) / (v[i] + __half2float(eps[0]));
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const float *b1_power, const float *learning_rate, const float *b1,
const float *b2, const float *eps, const double *gradient, double *variable,
double *m, double *v) {
const float one = static_cast<float>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = b1[0] * m[i] + (one - b1[0]) * gradient[i];
v[i] = MaxFunc(b2[0] * v[i], AbsFunc(gradient[i]));
variable[i] -= learning_rate[0] * m[i] / (one - b1_power[0]) / (v[i] + eps[0]);
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const float *b1_power, const float *learning_rate, const float *b1,
const float *b2, const float *eps, const float *gradient, half *variable, half *m,
half *v) {
const float one = static_cast<float>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = __float2half(b1[0]) * m[i] + __float2half(one - b1[0]) * __float2half(gradient[i]);
v[i] = MaxFunc(__float2half(b2[0]) * v[i], AbsFunc(__float2half(gradient[i])));
variable[i] -=
__float2half(learning_rate[0]) * m[i] / __float2half(one - b1_power[0]) / (v[i] + __float2half(eps[0]));
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const float *b1_power, const float *learning_rate, const float *b1,
const float *b2, const float *eps, const float *gradient, double *variable, double *m,
double *v) {
const float one = static_cast<float>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = b1[0] * m[i] + (one - b1[0]) * gradient[i];
v[i] = MaxFunc(b2[0] * v[i], static_cast<double>(AbsFunc(gradient[i])));
variable[i] -= learning_rate[0] * m[i] / (one - b1_power[0]) / (v[i] + eps[0]);
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const half *b1_power, const half *learning_rate, const half *b1,
const half *b2, const half *eps, const half *gradient, double *variable, double *m,
double *v) {
const half one = static_cast<half>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = __half2float(b1[0]) * m[i] + __half2float(one - b1[0]) * __half2float(gradient[i]);
v[i] = MaxFunc(__half2float(b2[0]) * v[i], static_cast<double>(AbsFunc(__half2float(gradient[i]))));
variable[i] -=
__half2float(learning_rate[0]) * m[i] / __half2float(one - b1_power[0]) / (v[i] + __half2float(eps[0]));
}
}
template <>
__global__ void ApplyAdamaxKernal(const size_t size, const half *b1_power, const half *learning_rate, const half *b1,
const half *b2, const half *eps, const half *gradient, float *variable, float *m,
float *v) {
const half one = static_cast<half>(1.0);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] = __half2float(b1[0]) * m[i] + __half2float(one - b1[0]) * __half2float(gradient[i]);
v[i] = MaxFunc(__half2float(b2[0]) * v[i], AbsFunc(__half2float(gradient[i])));
variable[i] -=
__half2float(learning_rate[0]) * m[i] / __half2float(one - b1_power[0]) / (v[i] + __half2float(eps[0]));
}
}
template <typename T, typename S, typename G>
cudaError_t ApplyAdamax(const size_t size, const S *b1_power, const S *learning_rate, const S *b1, const S *b2,
const S *eps, const G *gradient, T *variable, T *m, T *v, const uint32_t &device_id,
cudaStream_t cuda_stream) {
ApplyAdamaxKernal<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>(
size, b1_power, learning_rate, b1, b2, eps, gradient, variable, m, v);
CHECK_CUDA_LAUNCH_SUCCESS();
}
template CUDA_LIB_EXPORT cudaError_t ApplyAdamax<double, double, double>(
const size_t size, const double *b1_power, const double *learning_rate, const double *b1, const double *b2,
const double *eps, const double *gradient, double *variable, double *m, double *v, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdamax<float, float, float>(const size_t size, const float *b1_power,
const float *learning_rate, const float *b1,
const float *b2, const float *eps,
const float *gradient, float *variable, float *m,
float *v, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdamax<half, half, half>(
const size_t size, const half *b1_power, const half *learning_rate, const half *b1, const half *b2, const half *eps,
const half *gradient, half *variable, half *m, half *v, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdamax<half, float, half>(const size_t size, const float *b1_power,
const float *learning_rate, const float *b1,
const float *b2, const float *eps,
const half *gradient, half *variable, half *m,
half *v, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdamax<float, float, half>(const size_t size, const float *b1_power,
const float *learning_rate, const float *b1,
const float *b2, const float *eps,
const half *gradient, float *variable, float *m,
float *v, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdamax<float, half, float>(
const size_t size, const half *b1_power, const half *learning_rate, const half *b1, const half *b2, const half *eps,
const float *gradient, float *variable, float *m, float *v, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdamax<half, float, float>(const size_t size, const float *b1_power,
const float *learning_rate, const float *b1,
const float *b2, const float *eps,
const float *gradient, half *variable, half *m,
half *v, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdamax<float, half, half>(
const size_t size, const half *b1_power, const half *learning_rate, const half *b1, const half *b2, const half *eps,
const half *gradient, float *variable, float *m, float *v, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdamax<double, half, half>(
const size_t size, const half *b1_power, const half *learning_rate, const half *b1, const half *b2, const half *eps,
const half *gradient, double *variable, double *m, double *v, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdamax<double, half, double>(
const size_t size, const half *b1_power, const half *learning_rate, const half *b1, const half *b2, const half *eps,
const double *gradient, double *variable, double *m, double *v, const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdamax<double, float, double>(const size_t size, const float *b1_power,
const float *learning_rate, const float *b1,
const float *b2, const float *eps,
const double *gradient, double *variable,
double *m, double *v, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t ApplyAdamax<double, float, float>(const size_t size, const float *b1_power,
const float *learning_rate, const float *b1,
const float *b2, const float *eps,
const float *gradient, double *variable,
double *m, double *v, const uint32_t &device_id,
cudaStream_t cuda_stream);
|
eafd67b2429a8505134852ce57accecad7f62cb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <device_launch_parameters.h>
#include "caffe/layers/softmax_layer.hpp"
namespace caffe {
template <typename Dtype, typename Mtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Mtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Mtype maxval = -max_dtype<Mtype>();
for (int c = 0; c < channels; ++c) {
maxval = max((Mtype)data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype, typename Mtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Mtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= (Mtype)channel_max[n * spatial_dim + s];
}
}
template <typename Dtype, typename Mtype>
__global__ void kernel_exp(const int count, const Dtype* data, Mtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp((Mtype)data[index]);
}
}
template <typename Dtype, typename Mtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Mtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Mtype sum = 0.F;
for (int c = 0; c < channels; ++c) {
sum += (Mtype)data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype, typename Mtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Mtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
Dtype cs = channel_sum[n * spatial_dim + s];
data[index] /= (Mtype)(cs > min_dtype<Dtype>() || cs < - min_dtype<Dtype>() ?
cs : min_dtype<Dtype>());
}
}
template <typename Dtype, typename Mtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Mtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Mtype dot = 0.F;
for (int c = 0; c < channels; ++c) {
dot += (Mtype)data_1[(n * channels + c) * spatial_dim + s]
* (Mtype)data_2[(n * channels + c) * spatial_dim + s];
}
channel_dot[index] = dot;
}
}
template <typename Ftype, typename Btype>
void SoftmaxLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
Ftype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
hipStream_t stream = Caffe::thread_stream();
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, count, outer_num_, channels, inner_num_,
scale_data, top_data);
CUDA_CHECK(hipStreamSynchronize(stream));
}
template <typename Ftype, typename Btype>
void SoftmaxLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down, const vector<Blob*>& bottom) {
const Btype* top_diff = top[0]->gpu_diff<Btype>();
const Btype* top_data = top[0]->gpu_data<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
Ftype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
hipStream_t stream = Caffe::thread_stream();
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_dot), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
count, outer_num_, channels, inner_num_, scale_data, bottom_diff);
CUDA_CHECK(hipStreamSynchronize(stream));
// elementwise multiplication
caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(SoftmaxLayer);
} // namespace caffe
| eafd67b2429a8505134852ce57accecad7f62cb5.cu | #include <algorithm>
#include <device_launch_parameters.h>
#include "caffe/layers/softmax_layer.hpp"
namespace caffe {
template <typename Dtype, typename Mtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Mtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Mtype maxval = -max_dtype<Mtype>();
for (int c = 0; c < channels; ++c) {
maxval = max((Mtype)data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype, typename Mtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Mtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= (Mtype)channel_max[n * spatial_dim + s];
}
}
template <typename Dtype, typename Mtype>
__global__ void kernel_exp(const int count, const Dtype* data, Mtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp((Mtype)data[index]);
}
}
template <typename Dtype, typename Mtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Mtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Mtype sum = 0.F;
for (int c = 0; c < channels; ++c) {
sum += (Mtype)data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype, typename Mtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Mtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
Dtype cs = channel_sum[n * spatial_dim + s];
data[index] /= (Mtype)(cs > min_dtype<Dtype>() || cs < - min_dtype<Dtype>() ?
cs : min_dtype<Dtype>());
}
}
template <typename Dtype, typename Mtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Mtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Mtype dot = 0.F;
for (int c = 0; c < channels; ++c) {
dot += (Mtype)data_1[(n * channels + c) * spatial_dim + s]
* (Mtype)data_2[(n * channels + c) * spatial_dim + s];
}
channel_dot[index] = dot;
}
}
template <typename Ftype, typename Btype>
void SoftmaxLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
Ftype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
cudaStream_t stream = Caffe::thread_stream();
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS, 0, stream>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS, 0, stream>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS, 0, stream>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS, 0, stream>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template <typename Ftype, typename Btype>
void SoftmaxLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down, const vector<Blob*>& bottom) {
const Btype* top_diff = top[0]->gpu_diff<Btype>();
const Btype* top_data = top[0]->gpu_data<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
Ftype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
cudaStream_t stream = Caffe::thread_stream();
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS, 0, stream>>>(outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
count, outer_num_, channels, inner_num_, scale_data, bottom_diff);
CUDA_CHECK(cudaStreamSynchronize(stream));
// elementwise multiplication
caffe_gpu_mul(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(SoftmaxLayer);
} // namespace caffe
|
7efe9b8db14593917b82ca2e09db6243e66f1399.hip | // !!! This is a file automatically generated by hipify!!!
//
// This sample illustrates the usage of CUDA events for both GPU timing and
// overlapping CPU and GPU execution. Events are inserted into a stream
// of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can
// perform computations while GPU is executing (including DMA memcopies
// between the host and device). CPU can query CUDA events to determine
// whether GPU has completed tasks.
//
// includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper utility functions
__global__ void increment_kernel(int *g_data, int inc_value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + inc_value;
}
bool correct_output(int *data, const int n, const int x)
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %d\n", i, data[i], x);
return false;
}
return true;
}
int main(int argc, char *argv[])
{
int devID;
hipDeviceProp_t deviceProps;
printf("[%s] - Starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char **)argv);
// get device name
checkCudaErrors(hipGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
const int N = 1024 * 1024;
const int N_BYTES = N * sizeof(int);
const int VALUE = 26;
// allocate host memory
int* host_arr;
checkCudaErrors(hipHostMalloc((void **)&host_arr, N_BYTES));
memset(host_arr, 0, N_BYTES);
// allocate device memory
int* device_arr;
checkCudaErrors(hipMalloc((void **)&device_arr, N_BYTES));
checkCudaErrors(hipMemset(device_arr, 255, N_BYTES));
// create cuda event handles
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(hipDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
hipEventRecord(start, 0);
hipMemcpyAsync(device_arr, host_arr, N_BYTES, hipMemcpyHostToDevice, 0);
// set kernel launch configuration
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(N / threads.x, 1);
hipLaunchKernelGGL(( increment_kernel), dim3(blocks), dim3(threads), 0, 0, device_arr, VALUE);
hipMemcpyAsync(host_arr, device_arr, N_BYTES, hipMemcpyDeviceToHost, 0);
hipEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while (hipEventQuery(stop) == hipErrorNotReady)
{
counter++;
}
checkCudaErrors(hipEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// check the output for correctness
bool bFinalResults = correct_output(host_arr, N, VALUE);
// release resources
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipHostFree(host_arr));
checkCudaErrors(hipFree(device_arr));
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
} | 7efe9b8db14593917b82ca2e09db6243e66f1399.cu | //
// This sample illustrates the usage of CUDA events for both GPU timing and
// overlapping CPU and GPU execution. Events are inserted into a stream
// of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can
// perform computations while GPU is executing (including DMA memcopies
// between the host and device). CPU can query CUDA events to determine
// whether GPU has completed tasks.
//
// includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper utility functions
__global__ void increment_kernel(int *g_data, int inc_value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + inc_value;
}
bool correct_output(int *data, const int n, const int x)
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %d\n", i, data[i], x);
return false;
}
return true;
}
int main(int argc, char *argv[])
{
int devID;
cudaDeviceProp deviceProps;
printf("[%s] - Starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char **)argv);
// get device name
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
const int N = 1024 * 1024;
const int N_BYTES = N * sizeof(int);
const int VALUE = 26;
// allocate host memory
int* host_arr;
checkCudaErrors(cudaMallocHost((void **)&host_arr, N_BYTES));
memset(host_arr, 0, N_BYTES);
// allocate device memory
int* device_arr;
checkCudaErrors(cudaMalloc((void **)&device_arr, N_BYTES));
checkCudaErrors(cudaMemset(device_arr, 255, N_BYTES));
// create cuda event handles
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(cudaDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
cudaMemcpyAsync(device_arr, host_arr, N_BYTES, cudaMemcpyHostToDevice, 0);
// set kernel launch configuration
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(N / threads.x, 1);
increment_kernel<<<blocks, threads, 0, 0>>>(device_arr, VALUE);
cudaMemcpyAsync(host_arr, device_arr, N_BYTES, cudaMemcpyDeviceToHost, 0);
cudaEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// check the output for correctness
bool bFinalResults = correct_output(host_arr, N, VALUE);
// release resources
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaFreeHost(host_arr));
checkCudaErrors(cudaFree(device_arr));
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
} |
58ce57e06469afb19a90aeb6c351c9180ba4936a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
#include <stdio.h>
#include <math.h>
#include <chrono>
struct Source{
double x;
double y;
double z;
};
// Since sm35 is the targeted platform, and doesn't have float64 atomicAdd implemented,
// We need a custom atomicAdd function
__device__ double atomicAdd_sm35(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__global__ void
potential_reduce(
struct Source query_point,
struct Source *sources,
const int N,
double *partialSum,
double *sum
){
if(threadIdx.x==0){
partialSum[blockIdx.x]=0;
}
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
double threadSum =1;
if(i<N){
// Compute point source contribution
double r = sqrt(
pow((sources[i].x-query_point.x),2)
+pow((sources[i].y-query_point.y),2)
+pow((sources[i].z-query_point.z),2)
);
threadSum = 1.0/r;
// Block Sum
atomicAdd_sm35(&partialSum[blockIdx.x],threadSum);
__syncthreads();
}
if(threadIdx.x==0){
// Global Sum;
atomicAdd_sm35(&sum[0],partialSum[blockIdx.x]);
}
}
int main(int argc, char **argv)
{
auto start = std::chrono::system_clock::now();
int N = 31200;
struct Source *sources;
hipMallocManaged(&sources,N * sizeof(struct Source));
// Create a 10m x 2m x 2m box with 31200 point source on the surface
int count = 0;
for(int i=-100;i<100;i++){
for(int j=-19;j<19;j++){
double x=i*0.05+0.025;
double y=-1.0;
double z=j*0.05+0.025;
sources[count].x = x;
sources[count].y = y;
sources[count].z = z;
count++;
y=1.0;
sources[count].x = x;
sources[count].y = y;
sources[count].z = z;
count++;
}
}
for(int i=-100;i<100;i++){
for(int j=-20;j<20;j++){
double x=i*0.05+0.025;
double y=j*0.05+0.025;
double z=-1.0;
sources[count].x = x;
sources[count].y = y;
sources[count].z = z;
count++;
z=1.0;
sources[count].x = x;
sources[count].y = y;
sources[count].z = z;
count++;
}
}
int blockSize = 256;
int numBlocks = (N+blockSize -1)/blockSize;
double *partialSum;
double *sum;
hipMallocManaged(&partialSum,numBlocks*sizeof(double));
hipMallocManaged(&sum,sizeof(double));
struct Source query_point;
query_point.x = -2.0;
query_point.y = 0;
query_point.z = 0;
// auto start = std::chrono::system_clock::now();
for(int i=0;i<10;i++){
sum[0]=0;
hipLaunchKernelGGL(( potential_reduce), dim3(numBlocks),dim3(blockSize), 0, 0, query_point,sources,N,partialSum,sum);
hipDeviceSynchronize();
std::cout
<< "---" << std::endl
<< query_point.x << std::endl
<< query_point.y << std::endl
<< query_point.z << std::endl
<< "---" << std::endl
<< sum[0]
<< std::endl;
query_point.x+=0.5;
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end-start;
std::cout << "elapsed time: " << elapsed_seconds.count() << "s\n";
} | 58ce57e06469afb19a90aeb6c351c9180ba4936a.cu | #include <iostream>
#include <chrono>
#include <stdio.h>
#include <math.h>
#include <chrono>
struct Source{
double x;
double y;
double z;
};
// Since sm35 is the targeted platform, and doesn't have float64 atomicAdd implemented,
// We need a custom atomicAdd function
__device__ double atomicAdd_sm35(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__global__ void
potential_reduce(
struct Source query_point,
struct Source *sources,
const int N,
double *partialSum,
double *sum
){
if(threadIdx.x==0){
partialSum[blockIdx.x]=0;
}
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
double threadSum =1;
if(i<N){
// Compute point source contribution
double r = sqrt(
pow((sources[i].x-query_point.x),2)
+pow((sources[i].y-query_point.y),2)
+pow((sources[i].z-query_point.z),2)
);
threadSum = 1.0/r;
// Block Sum
atomicAdd_sm35(&partialSum[blockIdx.x],threadSum);
__syncthreads();
}
if(threadIdx.x==0){
// Global Sum;
atomicAdd_sm35(&sum[0],partialSum[blockIdx.x]);
}
}
int main(int argc, char **argv)
{
auto start = std::chrono::system_clock::now();
int N = 31200;
struct Source *sources;
cudaMallocManaged(&sources,N * sizeof(struct Source));
// Create a 10m x 2m x 2m box with 31200 point source on the surface
int count = 0;
for(int i=-100;i<100;i++){
for(int j=-19;j<19;j++){
double x=i*0.05+0.025;
double y=-1.0;
double z=j*0.05+0.025;
sources[count].x = x;
sources[count].y = y;
sources[count].z = z;
count++;
y=1.0;
sources[count].x = x;
sources[count].y = y;
sources[count].z = z;
count++;
}
}
for(int i=-100;i<100;i++){
for(int j=-20;j<20;j++){
double x=i*0.05+0.025;
double y=j*0.05+0.025;
double z=-1.0;
sources[count].x = x;
sources[count].y = y;
sources[count].z = z;
count++;
z=1.0;
sources[count].x = x;
sources[count].y = y;
sources[count].z = z;
count++;
}
}
int blockSize = 256;
int numBlocks = (N+blockSize -1)/blockSize;
double *partialSum;
double *sum;
cudaMallocManaged(&partialSum,numBlocks*sizeof(double));
cudaMallocManaged(&sum,sizeof(double));
struct Source query_point;
query_point.x = -2.0;
query_point.y = 0;
query_point.z = 0;
// auto start = std::chrono::system_clock::now();
for(int i=0;i<10;i++){
sum[0]=0;
potential_reduce<<<numBlocks,blockSize>>>(query_point,sources,N,partialSum,sum);
cudaDeviceSynchronize();
std::cout
<< "---" << std::endl
<< query_point.x << std::endl
<< query_point.y << std::endl
<< query_point.z << std::endl
<< "---" << std::endl
<< sum[0]
<< std::endl;
query_point.x+=0.5;
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end-start;
std::cout << "elapsed time: " << elapsed_seconds.count() << "s\n";
} |
cc69f5fa7358a9c01500590aa4f5ef87bba593b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/channel_shuffle_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ChannelShuffleForward(const int n, const Dtype* in, Dtype* out,
const int bottom_dim, const int feature_dim, const int channels) {
CUDA_KERNEL_LOOP(index, n) {
const int n = index / bottom_dim;
const int i = (index - n*bottom_dim) / feature_dim;
const int j = index - n*bottom_dim - i*feature_dim;
const int new_index = n*bottom_dim + j*channels + i;
out[new_index] = in[index];
}
}
template <typename Dtype>
void ChannelShuffleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (shuffle_pattern == 0) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ChannelShuffleForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, bottom_dim_, feature_dim_, channels_);
CUDA_POST_KERNEL_CHECK;
} else {
// TODO(Shuan) : parallelize Forward and Backward as pattern 0
for (int n = 0; n < num_; n++) {
for (int g = 0; g < group_; g++) {
for (int c = 0; c < group_chnl_num_; c+=shf_chnl_num_) {
int group_index = (c/shf_chnl_num_)%group_;
int feature_index = (c/shf_chnl_num_)/group_;
caffe_copy(
shf_chnl_num_*feature_dim_,
bottom_data+n*bottom_dim_+g*group_dim_+c*feature_dim_,
top_data+n*bottom_dim_+group_index*group_dim_+feature_index*feature_dim_*shf_chnl_num_
);
}
}
}
}
}
template <typename Dtype>
__global__ void ChannelShuffleBackward(const int n, const Dtype* in, Dtype* out,
const int bottom_dim, const int feature_dim, const int channels) {
CUDA_KERNEL_LOOP(index, n) {
const int n = index / bottom_dim;
const int j = (index - n*bottom_dim) / channels;
const int i = index - n*bottom_dim - j*channels;
const int new_index = n*bottom_dim + j + i*feature_dim;
out[new_index] = in[index];
}
}
template <typename Dtype>
void ChannelShuffleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
if (shuffle_pattern == 0) {
hipLaunchKernelGGL(( ChannelShuffleBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_diff, bottom_dim_, feature_dim_, channels_);
CUDA_POST_KERNEL_CHECK;
} else {
// TODO(Shuan) : parallelize Forward and Backward as pattern 0
for (int n = 0; n < num_; n++) {
for (int g = 0; g < group_; g++) {
for (int c = 0; c < group_chnl_num_; c+=shf_chnl_num_) {
int group_index = (c/shf_chnl_num_)%group_;
int feature_index = (c/shf_chnl_num_)/group_;
caffe_copy(
shf_chnl_num_*feature_dim_,
top_diff+n*bottom_dim_+group_index*group_dim_+feature_index*feature_dim_*shf_chnl_num_,
bottom_diff+n*bottom_dim_+g*group_dim_+c*feature_dim_
);
}
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ChannelShuffleLayer);
} // namespace caffe
| cc69f5fa7358a9c01500590aa4f5ef87bba593b3.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/channel_shuffle_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ChannelShuffleForward(const int n, const Dtype* in, Dtype* out,
const int bottom_dim, const int feature_dim, const int channels) {
CUDA_KERNEL_LOOP(index, n) {
const int n = index / bottom_dim;
const int i = (index - n*bottom_dim) / feature_dim;
const int j = index - n*bottom_dim - i*feature_dim;
const int new_index = n*bottom_dim + j*channels + i;
out[new_index] = in[index];
}
}
template <typename Dtype>
void ChannelShuffleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (shuffle_pattern == 0) {
// NOLINT_NEXT_LINE(whitespace/operators)
ChannelShuffleForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, bottom_dim_, feature_dim_, channels_);
CUDA_POST_KERNEL_CHECK;
} else {
// TODO(Shuan) : parallelize Forward and Backward as pattern 0
for (int n = 0; n < num_; n++) {
for (int g = 0; g < group_; g++) {
for (int c = 0; c < group_chnl_num_; c+=shf_chnl_num_) {
int group_index = (c/shf_chnl_num_)%group_;
int feature_index = (c/shf_chnl_num_)/group_;
caffe_copy(
shf_chnl_num_*feature_dim_,
bottom_data+n*bottom_dim_+g*group_dim_+c*feature_dim_,
top_data+n*bottom_dim_+group_index*group_dim_+feature_index*feature_dim_*shf_chnl_num_
);
}
}
}
}
}
template <typename Dtype>
__global__ void ChannelShuffleBackward(const int n, const Dtype* in, Dtype* out,
const int bottom_dim, const int feature_dim, const int channels) {
CUDA_KERNEL_LOOP(index, n) {
const int n = index / bottom_dim;
const int j = (index - n*bottom_dim) / channels;
const int i = index - n*bottom_dim - j*channels;
const int new_index = n*bottom_dim + j + i*feature_dim;
out[new_index] = in[index];
}
}
template <typename Dtype>
void ChannelShuffleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
if (shuffle_pattern == 0) {
ChannelShuffleBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_diff, bottom_dim_, feature_dim_, channels_);
CUDA_POST_KERNEL_CHECK;
} else {
// TODO(Shuan) : parallelize Forward and Backward as pattern 0
for (int n = 0; n < num_; n++) {
for (int g = 0; g < group_; g++) {
for (int c = 0; c < group_chnl_num_; c+=shf_chnl_num_) {
int group_index = (c/shf_chnl_num_)%group_;
int feature_index = (c/shf_chnl_num_)/group_;
caffe_copy(
shf_chnl_num_*feature_dim_,
top_diff+n*bottom_dim_+group_index*group_dim_+feature_index*feature_dim_*shf_chnl_num_,
bottom_diff+n*bottom_dim_+g*group_dim_+c*feature_dim_
);
}
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ChannelShuffleLayer);
} // namespace caffe
|
65a48f8c6e6c36e48433b92c47474732f7cd9385.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaKernels.cuh"
#include "stdafx.h"
#include "SequentialSvm.h"
///<summary>Used to end the training cycle.
///</summary>
__host__ __device__ double gaussKernel(const double* a, int aI, const double* b, const int bI, const int width, const double g)
{
int aIw = aI * width;
int bIw = bI * width;
double product=1;
double sum = 0;
for (int j = 0; j < width; ++j)
{
product = a[aIw + j] - b[bIw + j];
product *= product;
sum += product;
}
return exp(-g*sum);
}
__host__ __device__ double calcAlpha(double alpha, const double sum, const double y, double step, const double C)
{
auto newAlpha = alpha + step - step * y * sum;
if (newAlpha > C)
newAlpha = C;
else if (newAlpha < 0)
newAlpha = 0.0;
return newAlpha;
}
__host__ __device__ void updateStep(double& step, double& oldDif, double newDif)
{
if (oldDif*newDif<0 || abs(oldDif) <= abs(newDif))
step /= 2;
oldDif = newDif;
}
__global__ void classificationKernel(double *saida, const double *tX, const double *tY, const double *vX, const double *alpha, const double g, const int index, const int width, const int max)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx > max) return;
saida[idx] = tY[idx] * alpha[idx] * gaussKernel(tX, idx, vX, index, width, g);
}
__global__ void trainingKernelLoop(double *sum, const double *alpha, const double *x, const double *y, const double g, const int width, const int height, const int batchStart, const int batchEnd)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx > height) return;
double outerSum = sum[idx];
for (int i = batchStart; i < batchEnd; i++){
outerSum += alpha[i] * y[i] * gaussKernel(x, idx, x, i, width, g);
}
sum[idx] = outerSum;
}
__global__ void trainingKernelFinishMultiple(double *alpha, double *sum, const double *y, const int nSamples, double *step, double *last, const double C)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx > nSamples) return;
double newAlpha = calcAlpha(alpha[idx], sum[idx], y[idx], step[idx], C);
sum[idx] = 0.0;
auto dif = newAlpha - alpha[idx];
updateStep(step[idx], last[idx], dif);
alpha[idx] = newAlpha;
}
__global__ void trainingKernelFinishSingle(double *alpha, double *sum, const double *y, const int nSamples, double step, double *last, const double C)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx > nSamples) return;
double newAlpha = calcAlpha(alpha[idx], sum[idx], y[idx], step, C);
sum[idx] = 0.0;
auto dif = newAlpha - alpha[idx];
last[idx] = dif;
alpha[idx] = newAlpha;
}
__global__ void initArray(double *array, const double value, const int max)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < max)
array[idx] = value;
} | 65a48f8c6e6c36e48433b92c47474732f7cd9385.cu | #include "CudaKernels.cuh"
#include "stdafx.h"
#include "SequentialSvm.h"
///<summary>Used to end the training cycle.
///</summary>
__host__ __device__ double gaussKernel(const double* a, int aI, const double* b, const int bI, const int width, const double g)
{
int aIw = aI * width;
int bIw = bI * width;
double product=1;
double sum = 0;
for (int j = 0; j < width; ++j)
{
product = a[aIw + j] - b[bIw + j];
product *= product;
sum += product;
}
return exp(-g*sum);
}
__host__ __device__ double calcAlpha(double alpha, const double sum, const double y, double step, const double C)
{
auto newAlpha = alpha + step - step * y * sum;
if (newAlpha > C)
newAlpha = C;
else if (newAlpha < 0)
newAlpha = 0.0;
return newAlpha;
}
__host__ __device__ void updateStep(double& step, double& oldDif, double newDif)
{
if (oldDif*newDif<0 || abs(oldDif) <= abs(newDif))
step /= 2;
oldDif = newDif;
}
__global__ void classificationKernel(double *saida, const double *tX, const double *tY, const double *vX, const double *alpha, const double g, const int index, const int width, const int max)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx > max) return;
saida[idx] = tY[idx] * alpha[idx] * gaussKernel(tX, idx, vX, index, width, g);
}
__global__ void trainingKernelLoop(double *sum, const double *alpha, const double *x, const double *y, const double g, const int width, const int height, const int batchStart, const int batchEnd)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx > height) return;
double outerSum = sum[idx];
for (int i = batchStart; i < batchEnd; i++){
outerSum += alpha[i] * y[i] * gaussKernel(x, idx, x, i, width, g);
}
sum[idx] = outerSum;
}
__global__ void trainingKernelFinishMultiple(double *alpha, double *sum, const double *y, const int nSamples, double *step, double *last, const double C)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx > nSamples) return;
double newAlpha = calcAlpha(alpha[idx], sum[idx], y[idx], step[idx], C);
sum[idx] = 0.0;
auto dif = newAlpha - alpha[idx];
updateStep(step[idx], last[idx], dif);
alpha[idx] = newAlpha;
}
__global__ void trainingKernelFinishSingle(double *alpha, double *sum, const double *y, const int nSamples, double step, double *last, const double C)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx > nSamples) return;
double newAlpha = calcAlpha(alpha[idx], sum[idx], y[idx], step, C);
sum[idx] = 0.0;
auto dif = newAlpha - alpha[idx];
last[idx] = dif;
alpha[idx] = newAlpha;
}
__global__ void initArray(double *array, const double value, const int max)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < max)
array[idx] = value;
} |
86eaf1422591527bebc33c9d342240d9de4bbd58.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
cuda functions for gpu processing
*******************************************************************************/
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/imgproc.hpp>
#include <hip/hip_runtime_api.h>
#include <hipfft.h>
#include "video_param.h"
/******************************************************************************/
// __device__ void cufftReal_convert(cv::Mat * d_mat, hipfftReal * d_raw) {
//
// d_raw[blockIdx.x] = (hipfftReal) d_mat->at<double>(blockIdx.x);
// }
bool fft_batched(hipfftReal * d_raw, video_param_t video_param, hipfftComplex * d_ftd) {
// create plan for performing fft
hipfftHandle plan;
size_t batch = video_param.height * video_param.width;
size_t n_points = video_param.n_frames;
if (hipfftPlan1d(&plan, n_points, HIPFFT_R2C, batch) != HIPFFT_SUCCESS) {
printf("Failed to create 1D plan\n");
return -1;
}
// allocate return data
hipMalloc((void**) &d_ftd, sizeof(hipfftComplex)*n_points * batch);
if (hipGetLastError() != hipSuccess) {
printf("Failed to allocate memory space for transformed data.\n");
return -1;
}
// perform fft
if (hipfftExecR2C(plan, d_raw, d_ftd) != HIPFFT_SUCCESS) {
printf("Failed to perform fft.\n");
return -1;
}
hipfftDestroy(plan);
hipFree(d_raw);
return 0;
}
/******************************************************************************/
| 86eaf1422591527bebc33c9d342240d9de4bbd58.cu | /*******************************************************************************
cuda functions for gpu processing
*******************************************************************************/
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/imgproc.hpp>
#include <cuda_runtime_api.h>
#include <cufft.h>
#include "video_param.h"
/******************************************************************************/
// __device__ void cufftReal_convert(cv::Mat * d_mat, cufftReal * d_raw) {
//
// d_raw[blockIdx.x] = (cufftReal) d_mat->at<double>(blockIdx.x);
// }
bool fft_batched(cufftReal * d_raw, video_param_t video_param, cufftComplex * d_ftd) {
// create plan for performing fft
cufftHandle plan;
size_t batch = video_param.height * video_param.width;
size_t n_points = video_param.n_frames;
if (cufftPlan1d(&plan, n_points, CUFFT_R2C, batch) != CUFFT_SUCCESS) {
printf("Failed to create 1D plan\n");
return -1;
}
// allocate return data
cudaMalloc((void**) &d_ftd, sizeof(cufftComplex)*n_points * batch);
if (cudaGetLastError() != cudaSuccess) {
printf("Failed to allocate memory space for transformed data.\n");
return -1;
}
// perform fft
if (cufftExecR2C(plan, d_raw, d_ftd) != CUFFT_SUCCESS) {
printf("Failed to perform fft.\n");
return -1;
}
cufftDestroy(plan);
cudaFree(d_raw);
return 0;
}
/******************************************************************************/
|
ff472948eabd544d70e27d68534bb4fe69d192ef.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_multiply.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int ARows = 1;
int ACols = 1;
int BRows = 1;
int BCols = 1;
int CRows = 1;
int CCols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_multiply), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,ARows,ACols,BRows,BCols,CRows,CCols);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_multiply), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,ARows,ACols,BRows,BCols,CRows,CCols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_multiply), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,ARows,ACols,BRows,BCols,CRows,CCols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ff472948eabd544d70e27d68534bb4fe69d192ef.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_multiply.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int ARows = 1;
int ACols = 1;
int BRows = 1;
int BCols = 1;
int CRows = 1;
int CCols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_multiply<<<gridBlock,threadBlock>>>(A,B,C,ARows,ACols,BRows,BCols,CRows,CCols);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_multiply<<<gridBlock,threadBlock>>>(A,B,C,ARows,ACols,BRows,BCols,CRows,CCols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_multiply<<<gridBlock,threadBlock>>>(A,B,C,ARows,ACols,BRows,BCols,CRows,CCols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f52a6dfcd41056cfbd24b522e10cdd409fee7d8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void quickSort(int *x, int *dfirst, int *dlast, int *list)
{
int idx = threadIdx.x;
int first = dfirst[idx];
int last = dlast[idx];
list[idx] = 0;
if(first<last)
{
int pivot, j, temp, i;
pivot = first;
i = first;
j = last;
while(i<j)
{
while(x[i]<=x[pivot] && i<last)
i++;
while(x[j] > x[pivot])
j--;
if(i<j)
{
temp = x[i];
x[i] = x[j];
x[j] = temp;
}
}
temp = x[pivot];
x[pivot] = x[j];
x[j] = temp;
for(i=first; i<=last; i++)
if(x[i] > x[i+1])
{
list[idx] = j+1;
break;
}
}
} | f52a6dfcd41056cfbd24b522e10cdd409fee7d8f.cu | #include "includes.h"
__global__ void quickSort(int *x, int *dfirst, int *dlast, int *list)
{
int idx = threadIdx.x;
int first = dfirst[idx];
int last = dlast[idx];
list[idx] = 0;
if(first<last)
{
int pivot, j, temp, i;
pivot = first;
i = first;
j = last;
while(i<j)
{
while(x[i]<=x[pivot] && i<last)
i++;
while(x[j] > x[pivot])
j--;
if(i<j)
{
temp = x[i];
x[i] = x[j];
x[j] = temp;
}
}
temp = x[pivot];
x[pivot] = x[j];
x[j] = temp;
for(i=first; i<=last; i++)
if(x[i] > x[i+1])
{
list[idx] = j+1;
break;
}
}
} |
27909e18976722a42c0ff108f663762fd954dbed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
{
}
__global__ void DmeanSquareLoss(const int lengthx, const double pref, const double *gradc, const double *x,const double *y, double *gradn )
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthx)
{
gradn[i] += pref * gradc[0] * (x[i]-y[i]);
}
} | 27909e18976722a42c0ff108f663762fd954dbed.cu | #include "includes.h"
extern "C"
{
}
__global__ void DmeanSquareLoss(const int lengthx, const double pref, const double *gradc, const double *x,const double *y, double *gradn )
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthx)
{
gradn[i] += pref * gradc[0] * (x[i]-y[i]);
}
} |
34d414e9a7573e61830112b000fbe39b5286268e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kGamma.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
hipMalloc(&mat, XSIZE*YSIZE);
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
unsigned int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kGamma), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,target,len);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kGamma), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,target,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kGamma), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,target,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 34d414e9a7573e61830112b000fbe39b5286268e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kGamma.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
cudaMalloc(&mat, XSIZE*YSIZE);
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
unsigned int len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kGamma<<<gridBlock,threadBlock>>>(mat,target,len);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kGamma<<<gridBlock,threadBlock>>>(mat,target,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kGamma<<<gridBlock,threadBlock>>>(mat,target,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
58ef02c874f000d6f4544d8c7618f7c4fb849bc4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#define NUM_ELEM 8
#define NUM_THREADS 10
using namespace std;
__global__ void concurrentRW(int *data) {
// NUM_THREADS try to read and write at same location
//data[blockIdx.x] = data[blockIdx.x] + threadIdx.x;
atomicAdd(&data[blockIdx.x], threadIdx.x);
}
int main(int argc, char *argv[]) {
int* data = NULL;
bool errorsDetected = false;
hipMallocManaged(&data, NUM_ELEM * sizeof(unsigned long long int));
if (data == 0) {
cout << "[HOST] Couldn't allocate memory\n";
return 1;
}
// init all elements to 0
hipMemset(data, 0, NUM_ELEM);
// launch kernel writes
hipLaunchKernelGGL(( concurrentRW), dim3(NUM_ELEM), dim3(NUM_THREADS), 0, 0, data);
hipDeviceSynchronize();
if (hipSuccess != hipGetLastError()) {
return 1;
}
for(int i = 0; i < NUM_ELEM; i++) {
cout << i << ". " << data[i] << endl;
if(data[i] != (NUM_THREADS * (NUM_THREADS - 1) / 2)) {
errorsDetected = true;
}
}
if(errorsDetected) {
cout << "Errors detected" << endl;
} else {
cout << "OK" << endl;
}
return 0;
} | 58ef02c874f000d6f4544d8c7618f7c4fb849bc4.cu | #include <iostream>
#define NUM_ELEM 8
#define NUM_THREADS 10
using namespace std;
__global__ void concurrentRW(int *data) {
// NUM_THREADS try to read and write at same location
//data[blockIdx.x] = data[blockIdx.x] + threadIdx.x;
atomicAdd(&data[blockIdx.x], threadIdx.x);
}
int main(int argc, char *argv[]) {
int* data = NULL;
bool errorsDetected = false;
cudaMallocManaged(&data, NUM_ELEM * sizeof(unsigned long long int));
if (data == 0) {
cout << "[HOST] Couldn't allocate memory\n";
return 1;
}
// init all elements to 0
cudaMemset(data, 0, NUM_ELEM);
// launch kernel writes
concurrentRW<<<NUM_ELEM, NUM_THREADS>>>(data);
cudaDeviceSynchronize();
if (cudaSuccess != cudaGetLastError()) {
return 1;
}
for(int i = 0; i < NUM_ELEM; i++) {
cout << i << ". " << data[i] << endl;
if(data[i] != (NUM_THREADS * (NUM_THREADS - 1) / 2)) {
errorsDetected = true;
}
}
if(errorsDetected) {
cout << "Errors detected" << endl;
} else {
cout << "OK" << endl;
}
return 0;
} |
d8fda02e1af1aa9ce47df55a70c8c35642311f57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "csv_common.hpp"
#include "csv_gpu.hpp"
#include <io/utilities/block_utils.cuh>
#include <io/utilities/parsing_utils.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/strings/detail/convert/fixed_point.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <io/utilities/trie.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/count.h>
#include <thrust/detail/copy.h>
#include <thrust/remove.h>
#include <thrust/transform.h>
#include <type_traits>
using namespace ::cudf::io;
using cudf::device_span;
using cudf::detail::grid_1d;
namespace cudf {
namespace io {
namespace csv {
namespace gpu {
/// Block dimension for dtype detection and conversion kernels
constexpr uint32_t csvparse_block_dim = 128;
/*
* @brief Returns true is the input character is a valid digit.
* Supports both decimal and hexadecimal digits (uppercase and lowercase).
*
* @param c Character to check
* @param is_hex Whether to check as a hexadecimal
*
* @return `true` if it is digit-like, `false` otherwise
*/
__device__ __inline__ bool is_digit(char c, bool is_hex = false)
{
if (c >= '0' && c <= '9') return true;
if (is_hex) {
if (c >= 'A' && c <= 'F') return true;
if (c >= 'a' && c <= 'f') return true;
}
return false;
}
/*
* @brief Checks whether the given character counters indicate a potentially
* valid date and/or time field.
*
* For performance and simplicity, we detect only the most common date
* formats. Example formats that are detectable:
*
* `2001/02/30`
* `2001-02-30 00:00:00`
* `2/30/2001 T04:05:60.7`
* `2 / 1 / 2011`
* `02/January`
*
* @param len Number of non special-symbol or numeric characters
* @param decimal_count Number of '.' characters
* @param colon_count Number of ':' characters
* @param dash_count Number of '-' characters
* @param slash_count Number of '/' characters
*
* @return `true` if it is date-like, `false` otherwise
*/
__device__ __inline__ bool is_datetime(
long len, long decimal_count, long colon_count, long dash_count, long slash_count)
{
// Must not exceed count of longest month (September) plus `T` time indicator
if (len > 10) { return false; }
// Must not exceed more than one decimals or more than two time separators
if (decimal_count > 1 || colon_count > 2) { return false; }
// Must have one or two '-' or '/' but not both as date separators
if ((dash_count > 0 && dash_count < 3 && slash_count == 0) ||
(dash_count == 0 && slash_count > 0 && slash_count < 3)) {
return true;
}
return false;
}
/*
* @brief Returns true if the counters indicate a potentially valid float.
* False positives are possible because positions are not taken into account.
* For example, field "e.123-" would match the pattern.
*
* @param len Number of non special-symbol or numeric characters
* @param digit_count Number of digits characters
* @param decimal_count Number of occurrences of the decimal point character
* @param thousands_count Number of occurrences of the thousands separator character
* @param dash_count Number of '-' characters
* @param exponent_count Number of 'e or E' characters
*
* @return `true` if it is floating point-like, `false` otherwise
*/
__device__ __inline__ bool is_floatingpoint(long len,
long digit_count,
long decimal_count,
long thousands_count,
long dash_count,
long exponent_count)
{
// Can't have more than one exponent and one decimal point
if (decimal_count > 1) return false;
if (exponent_count > 1) return false;
// Without the exponent or a decimal point, this is an integer, not a float
if (decimal_count == 0 && exponent_count == 0) return false;
// Can only have one '-' per component
if (dash_count > 1 + exponent_count) return false;
// If anything other than these characters is present, it's not a float
if (digit_count + decimal_count + dash_count + exponent_count + thousands_count != len) {
return false;
}
// Needs at least 1 digit, 2 if exponent is present
if (digit_count < 1 + exponent_count) return false;
return true;
}
/*
* @brief CUDA kernel that parses and converts CSV data into cuDF column data.
*
* Data is processed in one row/record at a time, so the number of total
* threads (tid) is equal to the number of rows.
*
* @param opts A set of parsing options
* @param csv_text The entire CSV data to read
* @param column_flags Per-column parsing behavior flags
* @param row_offsets The start the CSV data of interest
* @param d_column_data The count for each column data type
*/
__global__ void __launch_bounds__(csvparse_block_dim)
data_type_detection(parse_options_view const opts,
device_span<char const> csv_text,
device_span<column_parse::flags const> const column_flags,
device_span<uint64_t const> const row_offsets,
device_span<column_type_histogram> d_column_data)
{
auto const raw_csv = csv_text.data();
// ThreadIds range per block, so also need the blockId
// This is entry into the fields; threadId is an element within `num_records`
auto const rec_id = grid_1d::global_thread_id();
auto const rec_id_next = rec_id + 1;
// we can have more threads than data, make sure we are not past the end of the data
if (rec_id_next >= row_offsets.size()) { return; }
auto field_start = raw_csv + row_offsets[rec_id];
auto const row_end = raw_csv + row_offsets[rec_id_next];
auto next_field = field_start;
int col = 0;
int actual_col = 0;
// Going through all the columns of a given record
while (col < column_flags.size() && field_start < row_end) {
auto next_delimiter = cudf::io::gpu::seek_field_end(field_start, row_end, opts);
// Checking if this is a column that the user wants --- user can filter columns
if (column_flags[col] & column_parse::inferred) {
// points to last character in the field
auto const field_len = static_cast<size_t>(next_delimiter - field_start);
if (serialized_trie_contains(opts.trie_na, {field_start, field_len})) {
atomicAdd(&d_column_data[actual_col].null_count, 1);
} else if (serialized_trie_contains(opts.trie_true, {field_start, field_len}) ||
serialized_trie_contains(opts.trie_false, {field_start, field_len})) {
atomicAdd(&d_column_data[actual_col].bool_count, 1);
} else if (cudf::io::is_infinity(field_start, next_delimiter)) {
atomicAdd(&d_column_data[actual_col].float_count, 1);
} else {
long count_number = 0;
long count_decimal = 0;
long count_thousands = 0;
long count_slash = 0;
long count_dash = 0;
long count_plus = 0;
long count_colon = 0;
long count_string = 0;
long count_exponent = 0;
// Modify field_start & end to ignore whitespace and quotechars
// This could possibly result in additional empty fields
auto const trimmed_field_range = trim_whitespaces_quotes(field_start, next_delimiter);
auto const trimmed_field_len = trimmed_field_range.second - trimmed_field_range.first;
for (auto cur = trimmed_field_range.first; cur < trimmed_field_range.second; ++cur) {
if (is_digit(*cur)) {
count_number++;
continue;
}
if (*cur == opts.decimal) {
count_decimal++;
continue;
}
if (*cur == opts.thousands) {
count_thousands++;
continue;
}
// Looking for unique characters that will help identify column types.
switch (*cur) {
case '-': count_dash++; break;
case '+': count_plus++; break;
case '/': count_slash++; break;
case ':': count_colon++; break;
case 'e':
case 'E':
if (cur > trimmed_field_range.first && cur < trimmed_field_range.second - 1)
count_exponent++;
break;
default: count_string++; break;
}
}
// Integers have to have the length of the string
// Off by one if they start with a minus sign
auto const int_req_number_cnt =
trimmed_field_len - count_thousands -
((*trimmed_field_range.first == '-' || *trimmed_field_range.first == '+') &&
trimmed_field_len > 1);
if (column_flags[col] & column_parse::as_datetime) {
// PANDAS uses `object` dtype if the date is unparseable
if (is_datetime(count_string, count_decimal, count_colon, count_dash, count_slash)) {
atomicAdd(&d_column_data[actual_col].datetime_count, 1);
} else {
atomicAdd(&d_column_data[actual_col].string_count, 1);
}
} else if (count_number == int_req_number_cnt) {
auto const is_negative = (*trimmed_field_range.first == '-');
auto const data_begin =
trimmed_field_range.first + (is_negative || (*trimmed_field_range.first == '+'));
cudf::size_type* ptr = cudf::io::gpu::infer_integral_field_counter(
data_begin, data_begin + count_number, is_negative, d_column_data[actual_col]);
atomicAdd(ptr, 1);
} else if (is_floatingpoint(trimmed_field_len,
count_number,
count_decimal,
count_thousands,
count_dash + count_plus,
count_exponent)) {
atomicAdd(&d_column_data[actual_col].float_count, 1);
} else {
atomicAdd(&d_column_data[actual_col].string_count, 1);
}
}
actual_col++;
}
next_field = next_delimiter + 1;
field_start = next_field;
col++;
}
}
/**
* @brief CUDA kernel that parses and converts CSV data into cuDF column data.
*
* Data is processed one record at a time
*
* @param[in] options A set of parsing options
* @param[in] data The entire CSV data to read
* @param[in] column_flags Per-column parsing behavior flags
* @param[in] row_offsets The start the CSV data of interest
* @param[in] dtypes The data type of the column
* @param[out] columns The output column data
* @param[out] valids The bitmaps indicating whether column fields are valid
* @param[out] valid_counts The number of valid fields in each column
*/
__global__ void __launch_bounds__(csvparse_block_dim)
convert_csv_to_cudf(cudf::io::parse_options_view options,
device_span<char const> data,
device_span<column_parse::flags const> column_flags,
device_span<uint64_t const> row_offsets,
device_span<cudf::data_type const> dtypes,
device_span<void* const> columns,
device_span<cudf::bitmask_type* const> valids,
device_span<size_type> valid_counts)
{
auto const raw_csv = data.data();
// thread IDs range per block, so also need the block id.
// this is entry into the field array - tid is an elements within the num_entries array
auto const rec_id = grid_1d::global_thread_id();
auto const rec_id_next = rec_id + 1;
// we can have more threads than data, make sure we are not past the end of the data
if (rec_id_next >= row_offsets.size()) return;
auto field_start = raw_csv + row_offsets[rec_id];
auto const row_end = raw_csv + row_offsets[rec_id_next];
auto next_field = field_start;
int col = 0;
int actual_col = 0;
while (col < column_flags.size() && field_start < row_end) {
auto next_delimiter = cudf::io::gpu::seek_field_end(next_field, row_end, options);
if (column_flags[col] & column_parse::enabled) {
// check if the entire field is a NaN string - consistent with pandas
auto const is_valid = !serialized_trie_contains(
options.trie_na, {field_start, static_cast<size_t>(next_delimiter - field_start)});
// Modify field_start & end to ignore whitespace and quotechars
auto field_end = next_delimiter;
if (is_valid && dtypes[actual_col].id() != cudf::type_id::STRING) {
auto const trimmed_field =
trim_whitespaces_quotes(field_start, field_end, options.quotechar);
field_start = trimmed_field.first;
field_end = trimmed_field.second;
}
if (is_valid) {
// Type dispatcher does not handle STRING
if (dtypes[actual_col].id() == cudf::type_id::STRING) {
auto end = next_delimiter;
if (not options.keepquotes) {
if ((*field_start == options.quotechar) && (*(end - 1) == options.quotechar)) {
++field_start;
--end;
}
}
auto str_list = static_cast<std::pair<char const*, size_t>*>(columns[actual_col]);
str_list[rec_id].first = field_start;
str_list[rec_id].second = end - field_start;
} else {
if (cudf::type_dispatcher(dtypes[actual_col],
ConvertFunctor{},
field_start,
field_end,
columns[actual_col],
rec_id,
dtypes[actual_col],
options,
column_flags[col] & column_parse::as_hexadecimal)) {
// set the valid bitmap - all bits were set to 0 to start
set_bit(valids[actual_col], rec_id);
atomicAdd(&valid_counts[actual_col], 1);
}
}
} else if (dtypes[actual_col].id() == cudf::type_id::STRING) {
auto str_list = static_cast<std::pair<char const*, size_t>*>(columns[actual_col]);
str_list[rec_id].first = nullptr;
str_list[rec_id].second = 0;
}
++actual_col;
}
next_field = next_delimiter + 1;
field_start = next_field;
++col;
}
}
/*
* @brief Merge two packed row contexts (each corresponding to a block of characters)
* and return the packed row context corresponding to the merged character block
*/
inline __device__ packed_rowctx_t merge_row_contexts(packed_rowctx_t first_ctx,
packed_rowctx_t second_ctx)
{
uint32_t id0 = get_row_context(first_ctx, ROW_CTX_NONE) & 3;
uint32_t id1 = get_row_context(first_ctx, ROW_CTX_QUOTE) & 3;
uint32_t id2 = get_row_context(first_ctx, ROW_CTX_COMMENT) & 3;
return (first_ctx & ~pack_row_contexts(3, 3, 3)) +
pack_row_contexts(get_row_context(second_ctx, id0),
get_row_context(second_ctx, id1),
get_row_context(second_ctx, id2));
}
/*
* @brief Per-character context:
* 1-bit count (0 or 1) per context in the lower 4 bits
* 2-bit output context id per input context in bits 8..15
*/
constexpr __device__ uint32_t make_char_context(uint32_t id0,
uint32_t id1,
uint32_t id2 = ROW_CTX_COMMENT,
uint32_t c0 = 0,
uint32_t c1 = 0,
uint32_t c2 = 0)
{
return (id0 << 8) | (id1 << 10) | (id2 << 12) | (ROW_CTX_EOF << 14) | (c0) | (c1 << 1) |
(c2 << 2);
}
/*
* @brief Merge a 1-character context to keep track of bitmasks where new rows occur
* Merges a single-character "block" row context at position pos with the current
* block's row context (the current block contains 32-pos characters)
*
* @param ctx Current block context and new rows bitmaps
* @param char_ctx state transitions associated with new character
* @param pos Position within the current 32-character block
*
* NOTE: This is probably the most performance-critical piece of the row gathering kernel.
* The char_ctx value should be created via make_char_context, and its value should
* have been evaluated at compile-time.
*/
inline __device__ void merge_char_context(uint4& ctx, uint32_t char_ctx, uint32_t pos)
{
uint32_t id0 = (ctx.w >> 0) & 3;
uint32_t id1 = (ctx.w >> 2) & 3;
uint32_t id2 = (ctx.w >> 4) & 3;
// Set the newrow bit in the bitmap at the corresponding position
ctx.x |= ((char_ctx >> id0) & 1) << pos;
ctx.y |= ((char_ctx >> id1) & 1) << pos;
ctx.z |= ((char_ctx >> id2) & 1) << pos;
// Update the output context ids
ctx.w = ((char_ctx >> (8 + id0 * 2)) & 0x03) | ((char_ctx >> (6 + id1 * 2)) & 0x0c) |
((char_ctx >> (4 + id2 * 2)) & 0x30) | (ROW_CTX_EOF << 6);
}
/*
* Convert the context-with-row-bitmaps version to a packed row context
*/
inline __device__ packed_rowctx_t pack_rowmaps(uint4 ctx_map)
{
return pack_row_contexts(make_row_context(__popc(ctx_map.x), (ctx_map.w >> 0) & 3),
make_row_context(__popc(ctx_map.y), (ctx_map.w >> 2) & 3),
make_row_context(__popc(ctx_map.z), (ctx_map.w >> 4) & 3));
}
/*
* Selects the row bitmap corresponding to the given parser state
*/
inline __device__ uint32_t select_rowmap(uint4 ctx_map, uint32_t ctxid)
{
return (ctxid == ROW_CTX_NONE) ? ctx_map.x
: (ctxid == ROW_CTX_QUOTE) ? ctx_map.y
: (ctxid == ROW_CTX_COMMENT) ? ctx_map.z
: 0;
}
/**
* @brief Single pair-wise 512-wide row context merge transform
*
* Merge row context blocks and record the merge operation in a context
* tree so that the transform is reversible.
* The tree is organized such that the left and right children of node n
* are located at indices n*2 and n*2+1, the root node starting at index 1
*
* @tparam lanemask mask to specify source of packed row context
* @tparam tmask mask to specify principle thread for merging row context
* @tparam base start location for writing into packed row context tree
* @tparam level_scale level of the node in the tree
* @param[out] ctxtree packed row context tree
* @param[in] ctxb packed row context for the current character block
* @param t thread id (leaf node id)
*/
template <uint32_t lanemask, uint32_t tmask, uint32_t base, uint32_t level_scale>
inline __device__ void ctx_merge(uint64_t* ctxtree, packed_rowctx_t* ctxb, uint32_t t)
{
uint64_t tmp = shuffle_xor(*ctxb, lanemask);
if (!(t & tmask)) {
*ctxb = merge_row_contexts(*ctxb, tmp);
ctxtree[base + (t >> level_scale)] = *ctxb;
}
}
/**
* @brief Single 512-wide row context inverse merge transform
*
* Walks the context tree starting from a root node
*
* @tparam rmask Mask to specify which threads write input row context
* @param[in] base Start read location of the merge transform tree
* @param[in] ctxtree Merge transform tree
* @param[in] ctx Input context
* @param[in] brow4 output row in block *4
* @param[in] t thread id (leaf node id)
*/
template <uint32_t rmask>
inline __device__ void ctx_unmerge(
uint32_t base, uint64_t* ctxtree, uint32_t* ctx, uint32_t* brow4, uint32_t t)
{
rowctx32_t ctxb_left, ctxb_right, ctxb_sum;
ctxb_sum = get_row_context(ctxtree[base], *ctx);
ctxb_left = get_row_context(ctxtree[(base)*2 + 0], *ctx);
ctxb_right = get_row_context(ctxtree[(base)*2 + 1], ctxb_left & 3);
if (t & (rmask)) {
*brow4 += (ctxb_sum & ~3) - (ctxb_right & ~3);
*ctx = ctxb_left & 3;
}
}
/*
* @brief 512-wide row context merge transform
*
* Repeatedly merge row context blocks, keeping track of each merge operation
* in a context tree so that the transform is reversible
* The tree is organized such that the left and right children of node n
* are located at indices n*2 and n*2+1, the root node starting at index 1
*
* Each node contains the counts and output contexts corresponding to the
* possible input contexts.
* Each parent node's count is obtained by adding the corresponding counts
* from the left child node with the right child node's count selected from
* the left child node's output context:
* parent.count[k] = left.count[k] + right.count[left.outctx[k]]
* parent.outctx[k] = right.outctx[left.outctx[k]]
*
* @param[out] ctxtree packed row context tree
* @param[in] ctxb packed row context for the current character block
* @param t thread id (leaf node id)
*/
static inline __device__ void rowctx_merge_transform(uint64_t ctxtree[1024],
packed_rowctx_t ctxb,
uint32_t t)
{
ctxtree[512 + t] = ctxb;
ctx_merge<1, 0x1, 256, 1>(ctxtree, &ctxb, t);
ctx_merge<2, 0x3, 128, 2>(ctxtree, &ctxb, t);
ctx_merge<4, 0x7, 64, 3>(ctxtree, &ctxb, t);
ctx_merge<8, 0xf, 32, 4>(ctxtree, &ctxb, t);
__syncthreads();
if (t < 32) {
ctxb = ctxtree[32 + t];
ctx_merge<1, 0x1, 16, 1>(ctxtree, &ctxb, t);
ctx_merge<2, 0x3, 8, 2>(ctxtree, &ctxb, t);
ctx_merge<4, 0x7, 4, 3>(ctxtree, &ctxb, t);
ctx_merge<8, 0xf, 2, 4>(ctxtree, &ctxb, t);
// Final stage
uint64_t tmp = shuffle_xor(ctxb, 16);
if (t == 0) { ctxtree[1] = merge_row_contexts(ctxb, tmp); }
}
}
/*
* @brief 512-wide row context inverse merge transform
*
* Walks the context tree starting from the root node (index 1) using
* the starting context in node index 0.
* The return value is the starting row and input context for the given leaf node
*
* @param[in] ctxtree Merge transform tree
* @param[in] t thread id (leaf node id)
*
* @return Final row context and count (row_position*4 + context_id format)
*/
static inline __device__ rowctx32_t rowctx_inverse_merge_transform(uint64_t ctxtree[1024],
uint32_t t)
{
uint32_t ctx = ctxtree[0] & 3; // Starting input context
rowctx32_t brow4 = 0; // output row in block *4
ctx_unmerge<256>(1, ctxtree, &ctx, &brow4, t);
ctx_unmerge<128>(2 + (t >> 8), ctxtree, &ctx, &brow4, t);
ctx_unmerge<64>(4 + (t >> 7), ctxtree, &ctx, &brow4, t);
ctx_unmerge<32>(8 + (t >> 6), ctxtree, &ctx, &brow4, t);
ctx_unmerge<16>(16 + (t >> 5), ctxtree, &ctx, &brow4, t);
ctx_unmerge<8>(32 + (t >> 4), ctxtree, &ctx, &brow4, t);
ctx_unmerge<4>(64 + (t >> 3), ctxtree, &ctx, &brow4, t);
ctx_unmerge<2>(128 + (t >> 2), ctxtree, &ctx, &brow4, t);
ctx_unmerge<1>(256 + (t >> 1), ctxtree, &ctx, &brow4, t);
return brow4 + ctx;
}
/**
* @brief Gather row offsets from CSV character data split into 16KB chunks
*
* This is done in two phases: the first phase returns the possible row counts
* per 16K character block for each possible parsing context at the start of the block,
* along with the resulting parsing context at the end of the block.
* The caller can then compute the actual parsing context at the beginning of each
* individual block and total row count.
* The second phase outputs the location of each row in the block, using the parsing
* context and initial row counter accumulated from the results of the previous phase.
* Row parsing context will be updated after phase 2 such that the value contains
* the number of rows starting at byte_range_end or beyond.
*
* @param row_ctx Row parsing context (output of phase 1 or input to phase 2)
* @param offsets_out Row offsets (nullptr for phase1, non-null indicates phase 2)
* @param data Base pointer of character data (all row offsets are relative to this)
* @param chunk_size Total number of characters to parse
* @param parse_pos Current parsing position in the file
* @param start_offset Position of the start of the character buffer in the file
* @param data_size CSV file size
* @param byte_range_start Ignore rows starting before this position in the file
* @param byte_range_end In phase 2, store the number of rows beyond range in row_ctx
* @param skip_rows Number of rows to skip (ignored in phase 1)
* @param terminator Line terminator character
* @param delimiter Column delimiter character
* @param quotechar Quote character
* @param escapechar Delimiter escape character
* @param commentchar Comment line character (skip rows starting with this character)
*/
__global__ void __launch_bounds__(rowofs_block_dim)
gather_row_offsets_gpu(uint64_t* row_ctx,
device_span<uint64_t> offsets_out,
device_span<char const> const data,
size_t chunk_size,
size_t parse_pos,
size_t start_offset,
size_t data_size,
size_t byte_range_start,
size_t byte_range_end,
size_t skip_rows,
int terminator,
int delimiter,
int quotechar,
int escapechar,
int commentchar)
{
auto start = data.begin();
using block_reduce = typename hipcub::BlockReduce<uint32_t, rowofs_block_dim>;
__shared__ union {
typename block_reduce::TempStorage bk_storage;
__align__(8) uint64_t ctxtree[rowofs_block_dim * 2];
} temp_storage;
char const* end = start + (min(parse_pos + chunk_size, data_size) - start_offset);
uint32_t t = threadIdx.x;
size_t block_pos =
(parse_pos - start_offset) + blockIdx.x * static_cast<size_t>(rowofs_block_bytes) + t * 32;
char const* cur = start + block_pos;
// Initial state is neutral context (no state transitions), zero rows
uint4 ctx_map = {
.x = 0,
.y = 0,
.z = 0,
.w = (ROW_CTX_NONE << 0) | (ROW_CTX_QUOTE << 2) | (ROW_CTX_COMMENT << 4) | (ROW_CTX_EOF << 6)};
int c, c_prev = (cur > start && cur <= end) ? cur[-1] : terminator;
// Loop through all 32 bytes and keep a bitmask of row starts for each possible input context
for (uint32_t pos = 0; pos < 32; pos++, cur++, c_prev = c) {
uint32_t ctx;
if (cur < end) {
c = cur[0];
if (c_prev == terminator) {
if (c == commentchar) {
// Start of a new comment row
ctx = make_char_context(ROW_CTX_COMMENT, ROW_CTX_QUOTE, ROW_CTX_COMMENT, 1, 0, 1);
} else if (c == quotechar) {
// Quoted string on newrow, or quoted string ending in terminator
ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE, ROW_CTX_QUOTE, 1, 0, 1);
} else {
// Start of a new row unless within a quote
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_NONE, 1, 0, 1);
}
} else if (c == quotechar) {
if (c_prev == delimiter || c_prev == quotechar) {
// Quoted string after delimiter, quoted string ending in delimiter, or double-quote
ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE);
} else {
// Closing or ignored quote
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_NONE);
}
} else {
// Neutral character
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE);
}
} else {
char const* data_end = start + data_size - start_offset;
if (cur <= end && cur == data_end) {
// Add a newline at data end (need the extra row offset to infer length of previous row)
ctx = make_char_context(ROW_CTX_EOF, ROW_CTX_EOF, ROW_CTX_EOF, 1, 1, 1);
} else {
// Pass-through context (beyond chunk_size or data_end)
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_COMMENT);
}
}
// Merge with current context, keeping track of where new rows occur
merge_char_context(ctx_map, ctx, pos);
}
// Eliminate rows that start before byte_range_start
if (start_offset + block_pos < byte_range_start) {
uint32_t dist_minus1 = min(byte_range_start - (start_offset + block_pos) - 1, UINT64_C(31));
uint32_t mask = 0xffff'fffe << dist_minus1;
ctx_map.x &= mask;
ctx_map.y &= mask;
ctx_map.z &= mask;
}
// Convert the long-form {rowmap,outctx}[inctx] version into packed version
// {rowcount,ouctx}[inctx], then merge the row contexts of the 32-character blocks into
// a single 16K-character block context
rowctx_merge_transform(temp_storage.ctxtree, pack_rowmaps(ctx_map), t);
// If this is the second phase, get the block's initial parser state and row counter
if (offsets_out.data()) {
if (t == 0) { temp_storage.ctxtree[0] = row_ctx[blockIdx.x]; }
__syncthreads();
// Walk back the transform tree with the known initial parser state
rowctx32_t ctx = rowctx_inverse_merge_transform(temp_storage.ctxtree, t);
uint64_t row = (temp_storage.ctxtree[0] >> 2) + (ctx >> 2);
uint32_t rows_out_of_range = 0;
uint32_t rowmap = select_rowmap(ctx_map, ctx & 3);
// Output row positions
while (rowmap != 0) {
uint32_t pos = __ffs(rowmap);
block_pos += pos;
if (row >= skip_rows && row - skip_rows < offsets_out.size()) {
// Output byte offsets are relative to the base of the input buffer
offsets_out[row - skip_rows] = block_pos - 1;
rows_out_of_range += (start_offset + block_pos - 1 >= byte_range_end);
}
row++;
rowmap >>= pos;
}
__syncthreads();
// Return the number of rows out of range
rows_out_of_range = block_reduce(temp_storage.bk_storage).Sum(rows_out_of_range);
if (t == 0) { row_ctx[blockIdx.x] = rows_out_of_range; }
} else {
// Just store the row counts and output contexts
if (t == 0) { row_ctx[blockIdx.x] = temp_storage.ctxtree[1]; }
}
}
size_t __host__ count_blank_rows(cudf::io::parse_options_view const& opts,
device_span<char const> data,
device_span<uint64_t const> row_offsets,
rmm::cuda_stream_view stream)
{
auto const newline = opts.skipblanklines ? opts.terminator : opts.comment;
auto const comment = opts.comment != '\0' ? opts.comment : newline;
auto const carriage = (opts.skipblanklines && opts.terminator == '\n') ? '\r' : comment;
return thrust::count_if(
rmm::exec_policy(stream),
row_offsets.begin(),
row_offsets.end(),
[data = data, newline, comment, carriage] __device__(uint64_t const pos) {
return ((pos != data.size()) &&
(data[pos] == newline || data[pos] == comment || data[pos] == carriage));
});
}
device_span<uint64_t> __host__ remove_blank_rows(cudf::io::parse_options_view const& options,
device_span<char const> data,
device_span<uint64_t> row_offsets,
rmm::cuda_stream_view stream)
{
size_t d_size = data.size();
auto const newline = options.skipblanklines ? options.terminator : options.comment;
auto const comment = options.comment != '\0' ? options.comment : newline;
auto const carriage = (options.skipblanklines && options.terminator == '\n') ? '\r' : comment;
auto new_end = thrust::remove_if(
rmm::exec_policy(stream),
row_offsets.begin(),
row_offsets.end(),
[data = data, d_size, newline, comment, carriage] __device__(uint64_t const pos) {
return ((pos != d_size) &&
(data[pos] == newline || data[pos] == comment || data[pos] == carriage));
});
return row_offsets.subspan(0, new_end - row_offsets.begin());
}
std::vector<column_type_histogram> detect_column_types(
cudf::io::parse_options_view const& options,
device_span<char const> const data,
device_span<column_parse::flags const> const column_flags,
device_span<uint64_t const> const row_starts,
size_t const num_active_columns,
rmm::cuda_stream_view stream)
{
// Calculate actual block count to use based on records count
int const block_size = csvparse_block_dim;
int const grid_size = (row_starts.size() + block_size - 1) / block_size;
auto d_stats = detail::make_zeroed_device_uvector_async<column_type_histogram>(
num_active_columns, stream, rmm::mr::get_current_device_resource());
hipLaunchKernelGGL(( data_type_detection), dim3(grid_size), dim3(block_size), 0, stream.value(),
options, data, column_flags, row_starts, d_stats);
return detail::make_std_vector_sync(d_stats, stream);
}
void decode_row_column_data(cudf::io::parse_options_view const& options,
device_span<char const> data,
device_span<column_parse::flags const> column_flags,
device_span<uint64_t const> row_offsets,
device_span<cudf::data_type const> dtypes,
device_span<void* const> columns,
device_span<cudf::bitmask_type* const> valids,
device_span<size_type> valid_counts,
rmm::cuda_stream_view stream)
{
// Calculate actual block count to use based on records count
auto const block_size = csvparse_block_dim;
auto const num_rows = row_offsets.size() - 1;
auto const grid_size = (num_rows + block_size - 1) / block_size;
hipLaunchKernelGGL(( convert_csv_to_cudf), dim3(grid_size), dim3(block_size), 0, stream.value(),
options, data, column_flags, row_offsets, dtypes, columns, valids, valid_counts);
}
uint32_t __host__ gather_row_offsets(parse_options_view const& options,
uint64_t* row_ctx,
device_span<uint64_t> const offsets_out,
device_span<char const> const data,
size_t chunk_size,
size_t parse_pos,
size_t start_offset,
size_t data_size,
size_t byte_range_start,
size_t byte_range_end,
size_t skip_rows,
rmm::cuda_stream_view stream)
{
uint32_t dim_grid = 1 + (chunk_size / rowofs_block_bytes);
hipLaunchKernelGGL(( gather_row_offsets_gpu), dim3(dim_grid), dim3(rowofs_block_dim), 0, stream.value(),
row_ctx,
offsets_out,
data,
chunk_size,
parse_pos,
start_offset,
data_size,
byte_range_start,
byte_range_end,
skip_rows,
options.terminator,
options.delimiter,
(options.quotechar) ? options.quotechar : 0x100,
/*(options.escapechar) ? options.escapechar :*/ 0x100,
(options.comment) ? options.comment : 0x100);
return dim_grid;
}
} // namespace gpu
} // namespace csv
} // namespace io
} // namespace cudf
| d8fda02e1af1aa9ce47df55a70c8c35642311f57.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "csv_common.hpp"
#include "csv_gpu.hpp"
#include <io/utilities/block_utils.cuh>
#include <io/utilities/parsing_utils.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/strings/detail/convert/fixed_point.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <io/utilities/trie.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/count.h>
#include <thrust/detail/copy.h>
#include <thrust/remove.h>
#include <thrust/transform.h>
#include <type_traits>
using namespace ::cudf::io;
using cudf::device_span;
using cudf::detail::grid_1d;
namespace cudf {
namespace io {
namespace csv {
namespace gpu {
/// Block dimension for dtype detection and conversion kernels
constexpr uint32_t csvparse_block_dim = 128;
/*
* @brief Returns true is the input character is a valid digit.
* Supports both decimal and hexadecimal digits (uppercase and lowercase).
*
* @param c Character to check
* @param is_hex Whether to check as a hexadecimal
*
* @return `true` if it is digit-like, `false` otherwise
*/
__device__ __inline__ bool is_digit(char c, bool is_hex = false)
{
if (c >= '0' && c <= '9') return true;
if (is_hex) {
if (c >= 'A' && c <= 'F') return true;
if (c >= 'a' && c <= 'f') return true;
}
return false;
}
/*
* @brief Checks whether the given character counters indicate a potentially
* valid date and/or time field.
*
* For performance and simplicity, we detect only the most common date
* formats. Example formats that are detectable:
*
* `2001/02/30`
* `2001-02-30 00:00:00`
* `2/30/2001 T04:05:60.7`
* `2 / 1 / 2011`
* `02/January`
*
* @param len Number of non special-symbol or numeric characters
* @param decimal_count Number of '.' characters
* @param colon_count Number of ':' characters
* @param dash_count Number of '-' characters
* @param slash_count Number of '/' characters
*
* @return `true` if it is date-like, `false` otherwise
*/
__device__ __inline__ bool is_datetime(
long len, long decimal_count, long colon_count, long dash_count, long slash_count)
{
// Must not exceed count of longest month (September) plus `T` time indicator
if (len > 10) { return false; }
// Must not exceed more than one decimals or more than two time separators
if (decimal_count > 1 || colon_count > 2) { return false; }
// Must have one or two '-' or '/' but not both as date separators
if ((dash_count > 0 && dash_count < 3 && slash_count == 0) ||
(dash_count == 0 && slash_count > 0 && slash_count < 3)) {
return true;
}
return false;
}
/*
* @brief Returns true if the counters indicate a potentially valid float.
* False positives are possible because positions are not taken into account.
* For example, field "e.123-" would match the pattern.
*
* @param len Number of non special-symbol or numeric characters
* @param digit_count Number of digits characters
* @param decimal_count Number of occurrences of the decimal point character
* @param thousands_count Number of occurrences of the thousands separator character
* @param dash_count Number of '-' characters
* @param exponent_count Number of 'e or E' characters
*
* @return `true` if it is floating point-like, `false` otherwise
*/
__device__ __inline__ bool is_floatingpoint(long len,
long digit_count,
long decimal_count,
long thousands_count,
long dash_count,
long exponent_count)
{
// Can't have more than one exponent and one decimal point
if (decimal_count > 1) return false;
if (exponent_count > 1) return false;
// Without the exponent or a decimal point, this is an integer, not a float
if (decimal_count == 0 && exponent_count == 0) return false;
// Can only have one '-' per component
if (dash_count > 1 + exponent_count) return false;
// If anything other than these characters is present, it's not a float
if (digit_count + decimal_count + dash_count + exponent_count + thousands_count != len) {
return false;
}
// Needs at least 1 digit, 2 if exponent is present
if (digit_count < 1 + exponent_count) return false;
return true;
}
/*
* @brief CUDA kernel that parses and converts CSV data into cuDF column data.
*
* Data is processed in one row/record at a time, so the number of total
* threads (tid) is equal to the number of rows.
*
* @param opts A set of parsing options
* @param csv_text The entire CSV data to read
* @param column_flags Per-column parsing behavior flags
* @param row_offsets The start the CSV data of interest
* @param d_column_data The count for each column data type
*/
__global__ void __launch_bounds__(csvparse_block_dim)
data_type_detection(parse_options_view const opts,
device_span<char const> csv_text,
device_span<column_parse::flags const> const column_flags,
device_span<uint64_t const> const row_offsets,
device_span<column_type_histogram> d_column_data)
{
auto const raw_csv = csv_text.data();
// ThreadIds range per block, so also need the blockId
// This is entry into the fields; threadId is an element within `num_records`
auto const rec_id = grid_1d::global_thread_id();
auto const rec_id_next = rec_id + 1;
// we can have more threads than data, make sure we are not past the end of the data
if (rec_id_next >= row_offsets.size()) { return; }
auto field_start = raw_csv + row_offsets[rec_id];
auto const row_end = raw_csv + row_offsets[rec_id_next];
auto next_field = field_start;
int col = 0;
int actual_col = 0;
// Going through all the columns of a given record
while (col < column_flags.size() && field_start < row_end) {
auto next_delimiter = cudf::io::gpu::seek_field_end(field_start, row_end, opts);
// Checking if this is a column that the user wants --- user can filter columns
if (column_flags[col] & column_parse::inferred) {
// points to last character in the field
auto const field_len = static_cast<size_t>(next_delimiter - field_start);
if (serialized_trie_contains(opts.trie_na, {field_start, field_len})) {
atomicAdd(&d_column_data[actual_col].null_count, 1);
} else if (serialized_trie_contains(opts.trie_true, {field_start, field_len}) ||
serialized_trie_contains(opts.trie_false, {field_start, field_len})) {
atomicAdd(&d_column_data[actual_col].bool_count, 1);
} else if (cudf::io::is_infinity(field_start, next_delimiter)) {
atomicAdd(&d_column_data[actual_col].float_count, 1);
} else {
long count_number = 0;
long count_decimal = 0;
long count_thousands = 0;
long count_slash = 0;
long count_dash = 0;
long count_plus = 0;
long count_colon = 0;
long count_string = 0;
long count_exponent = 0;
// Modify field_start & end to ignore whitespace and quotechars
// This could possibly result in additional empty fields
auto const trimmed_field_range = trim_whitespaces_quotes(field_start, next_delimiter);
auto const trimmed_field_len = trimmed_field_range.second - trimmed_field_range.first;
for (auto cur = trimmed_field_range.first; cur < trimmed_field_range.second; ++cur) {
if (is_digit(*cur)) {
count_number++;
continue;
}
if (*cur == opts.decimal) {
count_decimal++;
continue;
}
if (*cur == opts.thousands) {
count_thousands++;
continue;
}
// Looking for unique characters that will help identify column types.
switch (*cur) {
case '-': count_dash++; break;
case '+': count_plus++; break;
case '/': count_slash++; break;
case ':': count_colon++; break;
case 'e':
case 'E':
if (cur > trimmed_field_range.first && cur < trimmed_field_range.second - 1)
count_exponent++;
break;
default: count_string++; break;
}
}
// Integers have to have the length of the string
// Off by one if they start with a minus sign
auto const int_req_number_cnt =
trimmed_field_len - count_thousands -
((*trimmed_field_range.first == '-' || *trimmed_field_range.first == '+') &&
trimmed_field_len > 1);
if (column_flags[col] & column_parse::as_datetime) {
// PANDAS uses `object` dtype if the date is unparseable
if (is_datetime(count_string, count_decimal, count_colon, count_dash, count_slash)) {
atomicAdd(&d_column_data[actual_col].datetime_count, 1);
} else {
atomicAdd(&d_column_data[actual_col].string_count, 1);
}
} else if (count_number == int_req_number_cnt) {
auto const is_negative = (*trimmed_field_range.first == '-');
auto const data_begin =
trimmed_field_range.first + (is_negative || (*trimmed_field_range.first == '+'));
cudf::size_type* ptr = cudf::io::gpu::infer_integral_field_counter(
data_begin, data_begin + count_number, is_negative, d_column_data[actual_col]);
atomicAdd(ptr, 1);
} else if (is_floatingpoint(trimmed_field_len,
count_number,
count_decimal,
count_thousands,
count_dash + count_plus,
count_exponent)) {
atomicAdd(&d_column_data[actual_col].float_count, 1);
} else {
atomicAdd(&d_column_data[actual_col].string_count, 1);
}
}
actual_col++;
}
next_field = next_delimiter + 1;
field_start = next_field;
col++;
}
}
/**
* @brief CUDA kernel that parses and converts CSV data into cuDF column data.
*
* Data is processed one record at a time
*
* @param[in] options A set of parsing options
* @param[in] data The entire CSV data to read
* @param[in] column_flags Per-column parsing behavior flags
* @param[in] row_offsets The start the CSV data of interest
* @param[in] dtypes The data type of the column
* @param[out] columns The output column data
* @param[out] valids The bitmaps indicating whether column fields are valid
* @param[out] valid_counts The number of valid fields in each column
*/
__global__ void __launch_bounds__(csvparse_block_dim)
convert_csv_to_cudf(cudf::io::parse_options_view options,
device_span<char const> data,
device_span<column_parse::flags const> column_flags,
device_span<uint64_t const> row_offsets,
device_span<cudf::data_type const> dtypes,
device_span<void* const> columns,
device_span<cudf::bitmask_type* const> valids,
device_span<size_type> valid_counts)
{
auto const raw_csv = data.data();
// thread IDs range per block, so also need the block id.
// this is entry into the field array - tid is an elements within the num_entries array
auto const rec_id = grid_1d::global_thread_id();
auto const rec_id_next = rec_id + 1;
// we can have more threads than data, make sure we are not past the end of the data
if (rec_id_next >= row_offsets.size()) return;
auto field_start = raw_csv + row_offsets[rec_id];
auto const row_end = raw_csv + row_offsets[rec_id_next];
auto next_field = field_start;
int col = 0;
int actual_col = 0;
while (col < column_flags.size() && field_start < row_end) {
auto next_delimiter = cudf::io::gpu::seek_field_end(next_field, row_end, options);
if (column_flags[col] & column_parse::enabled) {
// check if the entire field is a NaN string - consistent with pandas
auto const is_valid = !serialized_trie_contains(
options.trie_na, {field_start, static_cast<size_t>(next_delimiter - field_start)});
// Modify field_start & end to ignore whitespace and quotechars
auto field_end = next_delimiter;
if (is_valid && dtypes[actual_col].id() != cudf::type_id::STRING) {
auto const trimmed_field =
trim_whitespaces_quotes(field_start, field_end, options.quotechar);
field_start = trimmed_field.first;
field_end = trimmed_field.second;
}
if (is_valid) {
// Type dispatcher does not handle STRING
if (dtypes[actual_col].id() == cudf::type_id::STRING) {
auto end = next_delimiter;
if (not options.keepquotes) {
if ((*field_start == options.quotechar) && (*(end - 1) == options.quotechar)) {
++field_start;
--end;
}
}
auto str_list = static_cast<std::pair<char const*, size_t>*>(columns[actual_col]);
str_list[rec_id].first = field_start;
str_list[rec_id].second = end - field_start;
} else {
if (cudf::type_dispatcher(dtypes[actual_col],
ConvertFunctor{},
field_start,
field_end,
columns[actual_col],
rec_id,
dtypes[actual_col],
options,
column_flags[col] & column_parse::as_hexadecimal)) {
// set the valid bitmap - all bits were set to 0 to start
set_bit(valids[actual_col], rec_id);
atomicAdd(&valid_counts[actual_col], 1);
}
}
} else if (dtypes[actual_col].id() == cudf::type_id::STRING) {
auto str_list = static_cast<std::pair<char const*, size_t>*>(columns[actual_col]);
str_list[rec_id].first = nullptr;
str_list[rec_id].second = 0;
}
++actual_col;
}
next_field = next_delimiter + 1;
field_start = next_field;
++col;
}
}
/*
* @brief Merge two packed row contexts (each corresponding to a block of characters)
* and return the packed row context corresponding to the merged character block
*/
inline __device__ packed_rowctx_t merge_row_contexts(packed_rowctx_t first_ctx,
packed_rowctx_t second_ctx)
{
uint32_t id0 = get_row_context(first_ctx, ROW_CTX_NONE) & 3;
uint32_t id1 = get_row_context(first_ctx, ROW_CTX_QUOTE) & 3;
uint32_t id2 = get_row_context(first_ctx, ROW_CTX_COMMENT) & 3;
return (first_ctx & ~pack_row_contexts(3, 3, 3)) +
pack_row_contexts(get_row_context(second_ctx, id0),
get_row_context(second_ctx, id1),
get_row_context(second_ctx, id2));
}
/*
* @brief Per-character context:
* 1-bit count (0 or 1) per context in the lower 4 bits
* 2-bit output context id per input context in bits 8..15
*/
constexpr __device__ uint32_t make_char_context(uint32_t id0,
uint32_t id1,
uint32_t id2 = ROW_CTX_COMMENT,
uint32_t c0 = 0,
uint32_t c1 = 0,
uint32_t c2 = 0)
{
return (id0 << 8) | (id1 << 10) | (id2 << 12) | (ROW_CTX_EOF << 14) | (c0) | (c1 << 1) |
(c2 << 2);
}
/*
* @brief Merge a 1-character context to keep track of bitmasks where new rows occur
* Merges a single-character "block" row context at position pos with the current
* block's row context (the current block contains 32-pos characters)
*
* @param ctx Current block context and new rows bitmaps
* @param char_ctx state transitions associated with new character
* @param pos Position within the current 32-character block
*
* NOTE: This is probably the most performance-critical piece of the row gathering kernel.
* The char_ctx value should be created via make_char_context, and its value should
* have been evaluated at compile-time.
*/
inline __device__ void merge_char_context(uint4& ctx, uint32_t char_ctx, uint32_t pos)
{
uint32_t id0 = (ctx.w >> 0) & 3;
uint32_t id1 = (ctx.w >> 2) & 3;
uint32_t id2 = (ctx.w >> 4) & 3;
// Set the newrow bit in the bitmap at the corresponding position
ctx.x |= ((char_ctx >> id0) & 1) << pos;
ctx.y |= ((char_ctx >> id1) & 1) << pos;
ctx.z |= ((char_ctx >> id2) & 1) << pos;
// Update the output context ids
ctx.w = ((char_ctx >> (8 + id0 * 2)) & 0x03) | ((char_ctx >> (6 + id1 * 2)) & 0x0c) |
((char_ctx >> (4 + id2 * 2)) & 0x30) | (ROW_CTX_EOF << 6);
}
/*
* Convert the context-with-row-bitmaps version to a packed row context
*/
inline __device__ packed_rowctx_t pack_rowmaps(uint4 ctx_map)
{
return pack_row_contexts(make_row_context(__popc(ctx_map.x), (ctx_map.w >> 0) & 3),
make_row_context(__popc(ctx_map.y), (ctx_map.w >> 2) & 3),
make_row_context(__popc(ctx_map.z), (ctx_map.w >> 4) & 3));
}
/*
* Selects the row bitmap corresponding to the given parser state
*/
inline __device__ uint32_t select_rowmap(uint4 ctx_map, uint32_t ctxid)
{
return (ctxid == ROW_CTX_NONE) ? ctx_map.x
: (ctxid == ROW_CTX_QUOTE) ? ctx_map.y
: (ctxid == ROW_CTX_COMMENT) ? ctx_map.z
: 0;
}
/**
* @brief Single pair-wise 512-wide row context merge transform
*
* Merge row context blocks and record the merge operation in a context
* tree so that the transform is reversible.
* The tree is organized such that the left and right children of node n
* are located at indices n*2 and n*2+1, the root node starting at index 1
*
* @tparam lanemask mask to specify source of packed row context
* @tparam tmask mask to specify principle thread for merging row context
* @tparam base start location for writing into packed row context tree
* @tparam level_scale level of the node in the tree
* @param[out] ctxtree packed row context tree
* @param[in] ctxb packed row context for the current character block
* @param t thread id (leaf node id)
*/
template <uint32_t lanemask, uint32_t tmask, uint32_t base, uint32_t level_scale>
inline __device__ void ctx_merge(uint64_t* ctxtree, packed_rowctx_t* ctxb, uint32_t t)
{
uint64_t tmp = shuffle_xor(*ctxb, lanemask);
if (!(t & tmask)) {
*ctxb = merge_row_contexts(*ctxb, tmp);
ctxtree[base + (t >> level_scale)] = *ctxb;
}
}
/**
* @brief Single 512-wide row context inverse merge transform
*
* Walks the context tree starting from a root node
*
* @tparam rmask Mask to specify which threads write input row context
* @param[in] base Start read location of the merge transform tree
* @param[in] ctxtree Merge transform tree
* @param[in] ctx Input context
* @param[in] brow4 output row in block *4
* @param[in] t thread id (leaf node id)
*/
template <uint32_t rmask>
inline __device__ void ctx_unmerge(
uint32_t base, uint64_t* ctxtree, uint32_t* ctx, uint32_t* brow4, uint32_t t)
{
rowctx32_t ctxb_left, ctxb_right, ctxb_sum;
ctxb_sum = get_row_context(ctxtree[base], *ctx);
ctxb_left = get_row_context(ctxtree[(base)*2 + 0], *ctx);
ctxb_right = get_row_context(ctxtree[(base)*2 + 1], ctxb_left & 3);
if (t & (rmask)) {
*brow4 += (ctxb_sum & ~3) - (ctxb_right & ~3);
*ctx = ctxb_left & 3;
}
}
/*
* @brief 512-wide row context merge transform
*
* Repeatedly merge row context blocks, keeping track of each merge operation
* in a context tree so that the transform is reversible
* The tree is organized such that the left and right children of node n
* are located at indices n*2 and n*2+1, the root node starting at index 1
*
* Each node contains the counts and output contexts corresponding to the
* possible input contexts.
* Each parent node's count is obtained by adding the corresponding counts
* from the left child node with the right child node's count selected from
* the left child node's output context:
* parent.count[k] = left.count[k] + right.count[left.outctx[k]]
* parent.outctx[k] = right.outctx[left.outctx[k]]
*
* @param[out] ctxtree packed row context tree
* @param[in] ctxb packed row context for the current character block
* @param t thread id (leaf node id)
*/
static inline __device__ void rowctx_merge_transform(uint64_t ctxtree[1024],
packed_rowctx_t ctxb,
uint32_t t)
{
ctxtree[512 + t] = ctxb;
ctx_merge<1, 0x1, 256, 1>(ctxtree, &ctxb, t);
ctx_merge<2, 0x3, 128, 2>(ctxtree, &ctxb, t);
ctx_merge<4, 0x7, 64, 3>(ctxtree, &ctxb, t);
ctx_merge<8, 0xf, 32, 4>(ctxtree, &ctxb, t);
__syncthreads();
if (t < 32) {
ctxb = ctxtree[32 + t];
ctx_merge<1, 0x1, 16, 1>(ctxtree, &ctxb, t);
ctx_merge<2, 0x3, 8, 2>(ctxtree, &ctxb, t);
ctx_merge<4, 0x7, 4, 3>(ctxtree, &ctxb, t);
ctx_merge<8, 0xf, 2, 4>(ctxtree, &ctxb, t);
// Final stage
uint64_t tmp = shuffle_xor(ctxb, 16);
if (t == 0) { ctxtree[1] = merge_row_contexts(ctxb, tmp); }
}
}
/*
* @brief 512-wide row context inverse merge transform
*
* Walks the context tree starting from the root node (index 1) using
* the starting context in node index 0.
* The return value is the starting row and input context for the given leaf node
*
* @param[in] ctxtree Merge transform tree
* @param[in] t thread id (leaf node id)
*
* @return Final row context and count (row_position*4 + context_id format)
*/
static inline __device__ rowctx32_t rowctx_inverse_merge_transform(uint64_t ctxtree[1024],
uint32_t t)
{
uint32_t ctx = ctxtree[0] & 3; // Starting input context
rowctx32_t brow4 = 0; // output row in block *4
ctx_unmerge<256>(1, ctxtree, &ctx, &brow4, t);
ctx_unmerge<128>(2 + (t >> 8), ctxtree, &ctx, &brow4, t);
ctx_unmerge<64>(4 + (t >> 7), ctxtree, &ctx, &brow4, t);
ctx_unmerge<32>(8 + (t >> 6), ctxtree, &ctx, &brow4, t);
ctx_unmerge<16>(16 + (t >> 5), ctxtree, &ctx, &brow4, t);
ctx_unmerge<8>(32 + (t >> 4), ctxtree, &ctx, &brow4, t);
ctx_unmerge<4>(64 + (t >> 3), ctxtree, &ctx, &brow4, t);
ctx_unmerge<2>(128 + (t >> 2), ctxtree, &ctx, &brow4, t);
ctx_unmerge<1>(256 + (t >> 1), ctxtree, &ctx, &brow4, t);
return brow4 + ctx;
}
/**
* @brief Gather row offsets from CSV character data split into 16KB chunks
*
* This is done in two phases: the first phase returns the possible row counts
* per 16K character block for each possible parsing context at the start of the block,
* along with the resulting parsing context at the end of the block.
* The caller can then compute the actual parsing context at the beginning of each
* individual block and total row count.
* The second phase outputs the location of each row in the block, using the parsing
* context and initial row counter accumulated from the results of the previous phase.
* Row parsing context will be updated after phase 2 such that the value contains
* the number of rows starting at byte_range_end or beyond.
*
* @param row_ctx Row parsing context (output of phase 1 or input to phase 2)
* @param offsets_out Row offsets (nullptr for phase1, non-null indicates phase 2)
* @param data Base pointer of character data (all row offsets are relative to this)
* @param chunk_size Total number of characters to parse
* @param parse_pos Current parsing position in the file
* @param start_offset Position of the start of the character buffer in the file
* @param data_size CSV file size
* @param byte_range_start Ignore rows starting before this position in the file
* @param byte_range_end In phase 2, store the number of rows beyond range in row_ctx
* @param skip_rows Number of rows to skip (ignored in phase 1)
* @param terminator Line terminator character
* @param delimiter Column delimiter character
* @param quotechar Quote character
* @param escapechar Delimiter escape character
* @param commentchar Comment line character (skip rows starting with this character)
*/
__global__ void __launch_bounds__(rowofs_block_dim)
gather_row_offsets_gpu(uint64_t* row_ctx,
device_span<uint64_t> offsets_out,
device_span<char const> const data,
size_t chunk_size,
size_t parse_pos,
size_t start_offset,
size_t data_size,
size_t byte_range_start,
size_t byte_range_end,
size_t skip_rows,
int terminator,
int delimiter,
int quotechar,
int escapechar,
int commentchar)
{
auto start = data.begin();
using block_reduce = typename cub::BlockReduce<uint32_t, rowofs_block_dim>;
__shared__ union {
typename block_reduce::TempStorage bk_storage;
__align__(8) uint64_t ctxtree[rowofs_block_dim * 2];
} temp_storage;
char const* end = start + (min(parse_pos + chunk_size, data_size) - start_offset);
uint32_t t = threadIdx.x;
size_t block_pos =
(parse_pos - start_offset) + blockIdx.x * static_cast<size_t>(rowofs_block_bytes) + t * 32;
char const* cur = start + block_pos;
// Initial state is neutral context (no state transitions), zero rows
uint4 ctx_map = {
.x = 0,
.y = 0,
.z = 0,
.w = (ROW_CTX_NONE << 0) | (ROW_CTX_QUOTE << 2) | (ROW_CTX_COMMENT << 4) | (ROW_CTX_EOF << 6)};
int c, c_prev = (cur > start && cur <= end) ? cur[-1] : terminator;
// Loop through all 32 bytes and keep a bitmask of row starts for each possible input context
for (uint32_t pos = 0; pos < 32; pos++, cur++, c_prev = c) {
uint32_t ctx;
if (cur < end) {
c = cur[0];
if (c_prev == terminator) {
if (c == commentchar) {
// Start of a new comment row
ctx = make_char_context(ROW_CTX_COMMENT, ROW_CTX_QUOTE, ROW_CTX_COMMENT, 1, 0, 1);
} else if (c == quotechar) {
// Quoted string on newrow, or quoted string ending in terminator
ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE, ROW_CTX_QUOTE, 1, 0, 1);
} else {
// Start of a new row unless within a quote
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_NONE, 1, 0, 1);
}
} else if (c == quotechar) {
if (c_prev == delimiter || c_prev == quotechar) {
// Quoted string after delimiter, quoted string ending in delimiter, or double-quote
ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE);
} else {
// Closing or ignored quote
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_NONE);
}
} else {
// Neutral character
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE);
}
} else {
char const* data_end = start + data_size - start_offset;
if (cur <= end && cur == data_end) {
// Add a newline at data end (need the extra row offset to infer length of previous row)
ctx = make_char_context(ROW_CTX_EOF, ROW_CTX_EOF, ROW_CTX_EOF, 1, 1, 1);
} else {
// Pass-through context (beyond chunk_size or data_end)
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_COMMENT);
}
}
// Merge with current context, keeping track of where new rows occur
merge_char_context(ctx_map, ctx, pos);
}
// Eliminate rows that start before byte_range_start
if (start_offset + block_pos < byte_range_start) {
uint32_t dist_minus1 = min(byte_range_start - (start_offset + block_pos) - 1, UINT64_C(31));
uint32_t mask = 0xffff'fffe << dist_minus1;
ctx_map.x &= mask;
ctx_map.y &= mask;
ctx_map.z &= mask;
}
// Convert the long-form {rowmap,outctx}[inctx] version into packed version
// {rowcount,ouctx}[inctx], then merge the row contexts of the 32-character blocks into
// a single 16K-character block context
rowctx_merge_transform(temp_storage.ctxtree, pack_rowmaps(ctx_map), t);
// If this is the second phase, get the block's initial parser state and row counter
if (offsets_out.data()) {
if (t == 0) { temp_storage.ctxtree[0] = row_ctx[blockIdx.x]; }
__syncthreads();
// Walk back the transform tree with the known initial parser state
rowctx32_t ctx = rowctx_inverse_merge_transform(temp_storage.ctxtree, t);
uint64_t row = (temp_storage.ctxtree[0] >> 2) + (ctx >> 2);
uint32_t rows_out_of_range = 0;
uint32_t rowmap = select_rowmap(ctx_map, ctx & 3);
// Output row positions
while (rowmap != 0) {
uint32_t pos = __ffs(rowmap);
block_pos += pos;
if (row >= skip_rows && row - skip_rows < offsets_out.size()) {
// Output byte offsets are relative to the base of the input buffer
offsets_out[row - skip_rows] = block_pos - 1;
rows_out_of_range += (start_offset + block_pos - 1 >= byte_range_end);
}
row++;
rowmap >>= pos;
}
__syncthreads();
// Return the number of rows out of range
rows_out_of_range = block_reduce(temp_storage.bk_storage).Sum(rows_out_of_range);
if (t == 0) { row_ctx[blockIdx.x] = rows_out_of_range; }
} else {
// Just store the row counts and output contexts
if (t == 0) { row_ctx[blockIdx.x] = temp_storage.ctxtree[1]; }
}
}
size_t __host__ count_blank_rows(cudf::io::parse_options_view const& opts,
device_span<char const> data,
device_span<uint64_t const> row_offsets,
rmm::cuda_stream_view stream)
{
auto const newline = opts.skipblanklines ? opts.terminator : opts.comment;
auto const comment = opts.comment != '\0' ? opts.comment : newline;
auto const carriage = (opts.skipblanklines && opts.terminator == '\n') ? '\r' : comment;
return thrust::count_if(
rmm::exec_policy(stream),
row_offsets.begin(),
row_offsets.end(),
[data = data, newline, comment, carriage] __device__(uint64_t const pos) {
return ((pos != data.size()) &&
(data[pos] == newline || data[pos] == comment || data[pos] == carriage));
});
}
device_span<uint64_t> __host__ remove_blank_rows(cudf::io::parse_options_view const& options,
device_span<char const> data,
device_span<uint64_t> row_offsets,
rmm::cuda_stream_view stream)
{
size_t d_size = data.size();
auto const newline = options.skipblanklines ? options.terminator : options.comment;
auto const comment = options.comment != '\0' ? options.comment : newline;
auto const carriage = (options.skipblanklines && options.terminator == '\n') ? '\r' : comment;
auto new_end = thrust::remove_if(
rmm::exec_policy(stream),
row_offsets.begin(),
row_offsets.end(),
[data = data, d_size, newline, comment, carriage] __device__(uint64_t const pos) {
return ((pos != d_size) &&
(data[pos] == newline || data[pos] == comment || data[pos] == carriage));
});
return row_offsets.subspan(0, new_end - row_offsets.begin());
}
std::vector<column_type_histogram> detect_column_types(
cudf::io::parse_options_view const& options,
device_span<char const> const data,
device_span<column_parse::flags const> const column_flags,
device_span<uint64_t const> const row_starts,
size_t const num_active_columns,
rmm::cuda_stream_view stream)
{
// Calculate actual block count to use based on records count
int const block_size = csvparse_block_dim;
int const grid_size = (row_starts.size() + block_size - 1) / block_size;
auto d_stats = detail::make_zeroed_device_uvector_async<column_type_histogram>(
num_active_columns, stream, rmm::mr::get_current_device_resource());
data_type_detection<<<grid_size, block_size, 0, stream.value()>>>(
options, data, column_flags, row_starts, d_stats);
return detail::make_std_vector_sync(d_stats, stream);
}
void decode_row_column_data(cudf::io::parse_options_view const& options,
device_span<char const> data,
device_span<column_parse::flags const> column_flags,
device_span<uint64_t const> row_offsets,
device_span<cudf::data_type const> dtypes,
device_span<void* const> columns,
device_span<cudf::bitmask_type* const> valids,
device_span<size_type> valid_counts,
rmm::cuda_stream_view stream)
{
// Calculate actual block count to use based on records count
auto const block_size = csvparse_block_dim;
auto const num_rows = row_offsets.size() - 1;
auto const grid_size = (num_rows + block_size - 1) / block_size;
convert_csv_to_cudf<<<grid_size, block_size, 0, stream.value()>>>(
options, data, column_flags, row_offsets, dtypes, columns, valids, valid_counts);
}
uint32_t __host__ gather_row_offsets(parse_options_view const& options,
uint64_t* row_ctx,
device_span<uint64_t> const offsets_out,
device_span<char const> const data,
size_t chunk_size,
size_t parse_pos,
size_t start_offset,
size_t data_size,
size_t byte_range_start,
size_t byte_range_end,
size_t skip_rows,
rmm::cuda_stream_view stream)
{
uint32_t dim_grid = 1 + (chunk_size / rowofs_block_bytes);
gather_row_offsets_gpu<<<dim_grid, rowofs_block_dim, 0, stream.value()>>>(
row_ctx,
offsets_out,
data,
chunk_size,
parse_pos,
start_offset,
data_size,
byte_range_start,
byte_range_end,
skip_rows,
options.terminator,
options.delimiter,
(options.quotechar) ? options.quotechar : 0x100,
/*(options.escapechar) ? options.escapechar :*/ 0x100,
(options.comment) ? options.comment : 0x100);
return dim_grid;
}
} // namespace gpu
} // namespace csv
} // namespace io
} // namespace cudf
|
cd59a399071b2a812138bf9ce049e91136a3a356.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/spmm.cu
* \brief SpGEAM C APIs and definitions.
*/
#include <dgl/array.h>
#include <dgl/runtime/device_api.h>
#include "functor.cuh"
#include "./cusparse_dispatcher.cuh"
#include "../../runtime/cuda/cuda_common.h"
namespace dgl {
using namespace dgl::runtime;
namespace aten {
namespace cusparse {
/*! Cusparse implementation of SpSum on Csr format. */
template <typename DType, typename IdType>
std::pair<CSRMatrix, NDArray> CusparseCsrgeam2(
const CSRMatrix& A,
const NDArray A_weights_array,
const CSRMatrix& B,
const NDArray B_weights_array) {
const int m = A.num_rows;
const int n = A.num_cols;
const int nnzA = A.indices->shape[0];
const int nnzB = B.indices->shape[0];
int nnzC;
const DType alpha = 1.0;
const DType beta = 1.0;
auto ctx = A.indptr->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
const DType* A_weights = A_weights_array.Ptr<DType>();
const DType* B_weights = B_weights_array.Ptr<DType>();
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle)
CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle)));
CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, thr_entry->stream));
hipsparseMatDescr_t matA, matB, matC;
CUSPARSE_CALL(hipsparseCreateMatDescr(&matA));
CUSPARSE_CALL(hipsparseCreateMatDescr(&matB));
CUSPARSE_CALL(hipsparseCreateMatDescr(&matC));
hipsparseSetPointerMode(thr_entry->cusparse_handle, HIPSPARSE_POINTER_MODE_HOST);
size_t workspace_size = 0;
/* prepare output C */
IdArray dC_csrOffsets = IdArray::Empty({A.num_rows+1}, A.indptr->dtype, ctx);
IdType* dC_csrOffsets_data = dC_csrOffsets.Ptr<IdType>();
IdArray dC_columns;
NDArray dC_weights;
IdType* dC_columns_data = dC_columns.Ptr<IdType>();
DType* dC_weights_data = dC_weights.Ptr<DType>();
/* prepare buffer */
CUSPARSE_CALL(CSRGEAM<DType>::bufferSizeExt(
thr_entry->cusparse_handle, m, n, &alpha,
matA, nnzA, A_weights,
A.indptr.Ptr<IdType>(),
A.indices.Ptr<IdType>(),
&beta, matB, nnzB, B_weights,
B.indptr.Ptr<IdType>(),
B.indices.Ptr<IdType>(),
matC, dC_weights_data, dC_csrOffsets_data, dC_columns_data,
&workspace_size));
void *workspace = device->AllocWorkspace(ctx, workspace_size);
CUSPARSE_CALL(CSRGEAM<DType>::nnz(thr_entry->cusparse_handle,
m, n, matA, nnzA,
A.indptr.Ptr<IdType>(),
A.indices.Ptr<IdType>(),
matB, nnzB,
B.indptr.Ptr<IdType>(),
B.indices.Ptr<IdType>(),
matC, dC_csrOffsets_data, &nnzC, workspace));
dC_columns = IdArray::Empty({nnzC}, A.indptr->dtype, ctx);
dC_weights = NDArray::Empty({nnzC}, A_weights_array->dtype, ctx);
dC_columns_data = dC_columns.Ptr<IdType>();
dC_weights_data = dC_weights.Ptr<DType>();
CUSPARSE_CALL(CSRGEAM<DType>::compute(
thr_entry->cusparse_handle, m, n, &alpha,
matA, nnzA, A_weights,
A.indptr.Ptr<IdType>(),
A.indices.Ptr<IdType>(),
&beta, matB, nnzB, B_weights,
B.indptr.Ptr<IdType>(),
B.indices.Ptr<IdType>(),
matC, dC_weights_data, dC_csrOffsets_data, dC_columns_data,
workspace));
device->FreeWorkspace(ctx, workspace);
// destroy matrix/vector descriptors
CUSPARSE_CALL(hipsparseDestroyMatDescr(matA));
CUSPARSE_CALL(hipsparseDestroyMatDescr(matB));
CUSPARSE_CALL(hipsparseDestroyMatDescr(matC));
return {CSRMatrix(A.num_rows, A.num_cols, dC_csrOffsets, dC_columns),
dC_weights};
}
} // namespace cusparse
template <int XPU, typename IdType, typename DType>
std::pair<CSRMatrix, NDArray> CSRSum(
const std::vector<CSRMatrix>& As,
const std::vector<NDArray>& A_weights) {
const int64_t M = As[0].num_rows;
const int64_t N = As[0].num_cols;
const int64_t n = As.size();
// Cast 64 bit indices to 32 bit
std::vector<CSRMatrix> newAs;
bool cast = false;
if (As[0].indptr->dtype.bits == 64) {
newAs.reserve(n);
for (int i = 0; i < n; ++i)
newAs.emplace_back(
As[i].num_rows, As[i].num_cols, AsNumBits(As[i].indptr, 32),
AsNumBits(As[i].indices, 32), AsNumBits(As[i].data, 32));
cast = true;
}
const std::vector<CSRMatrix> &As_ref = cast ? newAs : As;
// Reorder weights if A[i] has edge IDs
std::vector<NDArray> A_weights_reordered(n);
for (int i = 0; i < n; ++i) {
if (CSRHasData(As[i]))
A_weights_reordered[i] = IndexSelect(A_weights[i], As[i].data);
else
A_weights_reordered[i] = A_weights[i];
}
// Loop and sum
auto result = std::make_pair(
CSRMatrix(
As_ref[0].num_rows, As_ref[0].num_cols,
As_ref[0].indptr, As_ref[0].indices),
A_weights_reordered[0]); // Weights already reordered so we don't need As[0].data
for (int64_t i = 1; i < n; ++i)
result = cusparse::CusparseCsrgeam2<DType, int32_t>(
result.first, result.second, As_ref[i], A_weights_reordered[i]);
// Cast 32 bit indices back to 64 bit if necessary
if (cast) {
CSRMatrix C = result.first;
return {
CSRMatrix(C.num_rows, C.num_cols, AsNumBits(C.indptr, 64), AsNumBits(C.indices, 64)),
result.second};
} else {
return result;
}
}
template std::pair<CSRMatrix, NDArray> CSRSum<kDLGPU, int32_t, float>(
const std::vector<CSRMatrix>&, const std::vector<NDArray>&);
template std::pair<CSRMatrix, NDArray> CSRSum<kDLGPU, int64_t, float>(
const std::vector<CSRMatrix>&, const std::vector<NDArray>&);
template std::pair<CSRMatrix, NDArray> CSRSum<kDLGPU, int32_t, double>(
const std::vector<CSRMatrix>&, const std::vector<NDArray>&);
template std::pair<CSRMatrix, NDArray> CSRSum<kDLGPU, int64_t, double>(
const std::vector<CSRMatrix>&, const std::vector<NDArray>&);
} // namespace aten
} // namespace dgl
| cd59a399071b2a812138bf9ce049e91136a3a356.cu | /*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/spmm.cu
* \brief SpGEAM C APIs and definitions.
*/
#include <dgl/array.h>
#include <dgl/runtime/device_api.h>
#include "./functor.cuh"
#include "./cusparse_dispatcher.cuh"
#include "../../runtime/cuda/cuda_common.h"
namespace dgl {
using namespace dgl::runtime;
namespace aten {
namespace cusparse {
/*! Cusparse implementation of SpSum on Csr format. */
template <typename DType, typename IdType>
std::pair<CSRMatrix, NDArray> CusparseCsrgeam2(
const CSRMatrix& A,
const NDArray A_weights_array,
const CSRMatrix& B,
const NDArray B_weights_array) {
const int m = A.num_rows;
const int n = A.num_cols;
const int nnzA = A.indices->shape[0];
const int nnzB = B.indices->shape[0];
int nnzC;
const DType alpha = 1.0;
const DType beta = 1.0;
auto ctx = A.indptr->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
const DType* A_weights = A_weights_array.Ptr<DType>();
const DType* B_weights = B_weights_array.Ptr<DType>();
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle)
CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle)));
CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, thr_entry->stream));
cusparseMatDescr_t matA, matB, matC;
CUSPARSE_CALL(cusparseCreateMatDescr(&matA));
CUSPARSE_CALL(cusparseCreateMatDescr(&matB));
CUSPARSE_CALL(cusparseCreateMatDescr(&matC));
cusparseSetPointerMode(thr_entry->cusparse_handle, CUSPARSE_POINTER_MODE_HOST);
size_t workspace_size = 0;
/* prepare output C */
IdArray dC_csrOffsets = IdArray::Empty({A.num_rows+1}, A.indptr->dtype, ctx);
IdType* dC_csrOffsets_data = dC_csrOffsets.Ptr<IdType>();
IdArray dC_columns;
NDArray dC_weights;
IdType* dC_columns_data = dC_columns.Ptr<IdType>();
DType* dC_weights_data = dC_weights.Ptr<DType>();
/* prepare buffer */
CUSPARSE_CALL(CSRGEAM<DType>::bufferSizeExt(
thr_entry->cusparse_handle, m, n, &alpha,
matA, nnzA, A_weights,
A.indptr.Ptr<IdType>(),
A.indices.Ptr<IdType>(),
&beta, matB, nnzB, B_weights,
B.indptr.Ptr<IdType>(),
B.indices.Ptr<IdType>(),
matC, dC_weights_data, dC_csrOffsets_data, dC_columns_data,
&workspace_size));
void *workspace = device->AllocWorkspace(ctx, workspace_size);
CUSPARSE_CALL(CSRGEAM<DType>::nnz(thr_entry->cusparse_handle,
m, n, matA, nnzA,
A.indptr.Ptr<IdType>(),
A.indices.Ptr<IdType>(),
matB, nnzB,
B.indptr.Ptr<IdType>(),
B.indices.Ptr<IdType>(),
matC, dC_csrOffsets_data, &nnzC, workspace));
dC_columns = IdArray::Empty({nnzC}, A.indptr->dtype, ctx);
dC_weights = NDArray::Empty({nnzC}, A_weights_array->dtype, ctx);
dC_columns_data = dC_columns.Ptr<IdType>();
dC_weights_data = dC_weights.Ptr<DType>();
CUSPARSE_CALL(CSRGEAM<DType>::compute(
thr_entry->cusparse_handle, m, n, &alpha,
matA, nnzA, A_weights,
A.indptr.Ptr<IdType>(),
A.indices.Ptr<IdType>(),
&beta, matB, nnzB, B_weights,
B.indptr.Ptr<IdType>(),
B.indices.Ptr<IdType>(),
matC, dC_weights_data, dC_csrOffsets_data, dC_columns_data,
workspace));
device->FreeWorkspace(ctx, workspace);
// destroy matrix/vector descriptors
CUSPARSE_CALL(cusparseDestroyMatDescr(matA));
CUSPARSE_CALL(cusparseDestroyMatDescr(matB));
CUSPARSE_CALL(cusparseDestroyMatDescr(matC));
return {CSRMatrix(A.num_rows, A.num_cols, dC_csrOffsets, dC_columns),
dC_weights};
}
} // namespace cusparse
template <int XPU, typename IdType, typename DType>
std::pair<CSRMatrix, NDArray> CSRSum(
const std::vector<CSRMatrix>& As,
const std::vector<NDArray>& A_weights) {
const int64_t M = As[0].num_rows;
const int64_t N = As[0].num_cols;
const int64_t n = As.size();
// Cast 64 bit indices to 32 bit
std::vector<CSRMatrix> newAs;
bool cast = false;
if (As[0].indptr->dtype.bits == 64) {
newAs.reserve(n);
for (int i = 0; i < n; ++i)
newAs.emplace_back(
As[i].num_rows, As[i].num_cols, AsNumBits(As[i].indptr, 32),
AsNumBits(As[i].indices, 32), AsNumBits(As[i].data, 32));
cast = true;
}
const std::vector<CSRMatrix> &As_ref = cast ? newAs : As;
// Reorder weights if A[i] has edge IDs
std::vector<NDArray> A_weights_reordered(n);
for (int i = 0; i < n; ++i) {
if (CSRHasData(As[i]))
A_weights_reordered[i] = IndexSelect(A_weights[i], As[i].data);
else
A_weights_reordered[i] = A_weights[i];
}
// Loop and sum
auto result = std::make_pair(
CSRMatrix(
As_ref[0].num_rows, As_ref[0].num_cols,
As_ref[0].indptr, As_ref[0].indices),
A_weights_reordered[0]); // Weights already reordered so we don't need As[0].data
for (int64_t i = 1; i < n; ++i)
result = cusparse::CusparseCsrgeam2<DType, int32_t>(
result.first, result.second, As_ref[i], A_weights_reordered[i]);
// Cast 32 bit indices back to 64 bit if necessary
if (cast) {
CSRMatrix C = result.first;
return {
CSRMatrix(C.num_rows, C.num_cols, AsNumBits(C.indptr, 64), AsNumBits(C.indices, 64)),
result.second};
} else {
return result;
}
}
template std::pair<CSRMatrix, NDArray> CSRSum<kDLGPU, int32_t, float>(
const std::vector<CSRMatrix>&, const std::vector<NDArray>&);
template std::pair<CSRMatrix, NDArray> CSRSum<kDLGPU, int64_t, float>(
const std::vector<CSRMatrix>&, const std::vector<NDArray>&);
template std::pair<CSRMatrix, NDArray> CSRSum<kDLGPU, int32_t, double>(
const std::vector<CSRMatrix>&, const std::vector<NDArray>&);
template std::pair<CSRMatrix, NDArray> CSRSum<kDLGPU, int64_t, double>(
const std::vector<CSRMatrix>&, const std::vector<NDArray>&);
} // namespace aten
} // namespace dgl
|
23ab5d2440c47e68416afed40cad6471bfb47554.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<math.h>
#include<complex.h>
#include<hipfft.h>
#define Nx 10
#define BATCH 1
typedef double complex cplx;
int main()
{
hipfftHandle plan;
hipDoubleComplex *data;
hipDoubleComplex dataH[Nx];
for(int i=0;i<Nx;i++)
{
dataH[i].x=i;
dataH[i].y=0.0;
}
hipMalloc((void**)&data,sizeof(hipDoubleComplex)*Nx*BATCH);
if (hipGetLastError() != hipSuccess)
{
fprintf(stderr, "Cuda error: Failed to allocate\n");
return 0;
}
if (hipfftPlan1d(&plan, Nx, HIPFFT_Z2Z,BATCH) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: Plan creation failed");
return 0;
}
size_t sizeData= Nx*sizeof(hipDoubleComplex);
hipMemcpy(data,dataH,sizeData,hipMemcpyHostToDevice);
if (hipfftExecZ2Z(plan, data, data, HIPFFT_FORWARD) != HIPFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecC2C Forward failed");
return 0;
}
hipMemcpy(dataH,data,sizeData,hipMemcpyDeviceToHost);
printf("\n");
for(int i=0;i<Nx;i++)
printf("%f:%f ",dataH[i].x,dataH[i].y);
return 0;
}
| 23ab5d2440c47e68416afed40cad6471bfb47554.cu | #include<stdio.h>
#include<math.h>
#include<complex.h>
#include<cufft.h>
#define Nx 10
#define BATCH 1
typedef double complex cplx;
int main()
{
cufftHandle plan;
cuDoubleComplex *data;
cuDoubleComplex dataH[Nx];
for(int i=0;i<Nx;i++)
{
dataH[i].x=i;
dataH[i].y=0.0;
}
cudaMalloc((void**)&data,sizeof(cuDoubleComplex)*Nx*BATCH);
if (cudaGetLastError() != cudaSuccess)
{
fprintf(stderr, "Cuda error: Failed to allocate\n");
return 0;
}
if (cufftPlan1d(&plan, Nx, CUFFT_Z2Z,BATCH) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: Plan creation failed");
return 0;
}
size_t sizeData= Nx*sizeof(cuDoubleComplex);
cudaMemcpy(data,dataH,sizeData,cudaMemcpyHostToDevice);
if (cufftExecZ2Z(plan, data, data, CUFFT_FORWARD) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecC2C Forward failed");
return 0;
}
cudaMemcpy(dataH,data,sizeData,cudaMemcpyDeviceToHost);
printf("\n");
for(int i=0;i<Nx;i++)
printf("%f:%f ",dataH[i].x,dataH[i].y);
return 0;
}
|
a5eeae5d355698e671146424fe437782dbbe87c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <time.h>
const int listLength = 753411;
__global__ void squareKernel(float* d_in, float *d_out, int threads_num) {
const unsigned int lid = threadIdx.x; // local id inside a block
const unsigned int gid = blockIdx.x*blockDim.x + lid; // global id
if (gid < threads_num){
d_out[gid] = powf((d_in[gid]/(d_in[gid]-2.3)),3);
}// do computation
}
int timeval_subtract(struct timeval* result,struct timeval* t2,struct timeval* t1) {
unsigned int resolution=1000000;
long int diff = (t2->tv_usec + resolution * t2->tv_sec) -(t1->tv_usec + resolution * t1->tv_sec) ;
result->tv_sec = diff / resolution;
result->tv_usec = diff % resolution;
return (diff<0);
}
int main(int argc, char** arigv) {
unsigned int num_threads = listLength;
unsigned int mem_size = num_threads*sizeof(float);
unsigned int block_size = 256;
unsigned int num_blocks = ((num_threads + (block_size-1)) / block_size);
unsigned long int elapsed1;
unsigned long int elapsed2;
struct timeval t_start, t_end, t_diff;
float* h_in = (float*)malloc(mem_size);
float* h_out = (float*)malloc(mem_size);
float tmpList[listLength];
float epsilon = 1*1e-4;
for(unsigned int i = 0; i<num_threads; ++i){
h_in[i] = (float)i;
}
//Serial mapping
gettimeofday(&t_start, NULL);
for(int i = 0; i < listLength; ++i){
tmpList[i] = powf((h_in[i]/(h_in[i]-2.3)),3.0);
}
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed1 = t_diff.tv_sec*1e6+t_diff.tv_usec;
printf("Serial Mapping took %d microseconds (%.2fms)\n",elapsed1,elapsed1/1000.0);
//Parallel Mapping
float* d_in;
float* d_out;
hipMalloc((void**)&d_in, mem_size);
hipMalloc((void**)&d_out, mem_size);
hipMemcpy(d_in, h_in, mem_size, hipMemcpyHostToDevice);
gettimeofday(&t_start, NULL);
hipLaunchKernelGGL(( squareKernel), dim3(num_blocks), dim3(block_size), 0, 0, d_in, d_out, num_threads);
hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed2 = t_diff.tv_sec*1e6+t_diff.tv_usec;
printf("Parallel mapping took %d microseconds (%.2fms)\n",elapsed2,elapsed2/1000.0);
hipMemcpy(h_out, d_out, sizeof(float)*num_threads, hipMemcpyDeviceToHost);
unsigned int mep = 1;
for(unsigned int i=0; i<num_threads; ++i){
if(abs(h_out[i] - tmpList[i]) > epsilon){
printf("Something failed: at index");
}
}
if(mep == 1){
std::cout<<"Valid\n";
}
if(mep == 0){
std::cout<<"Invalid\n";
}
// clean-up memory
free(h_in);
free(h_out);
hipFree(d_in);
hipFree(d_out);
}
| a5eeae5d355698e671146424fe437782dbbe87c0.cu | #include <stdlib.h>
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#include <time.h>
const int listLength = 753411;
__global__ void squareKernel(float* d_in, float *d_out, int threads_num) {
const unsigned int lid = threadIdx.x; // local id inside a block
const unsigned int gid = blockIdx.x*blockDim.x + lid; // global id
if (gid < threads_num){
d_out[gid] = powf((d_in[gid]/(d_in[gid]-2.3)),3);
}// do computation
}
int timeval_subtract(struct timeval* result,struct timeval* t2,struct timeval* t1) {
unsigned int resolution=1000000;
long int diff = (t2->tv_usec + resolution * t2->tv_sec) -(t1->tv_usec + resolution * t1->tv_sec) ;
result->tv_sec = diff / resolution;
result->tv_usec = diff % resolution;
return (diff<0);
}
int main(int argc, char** arigv) {
unsigned int num_threads = listLength;
unsigned int mem_size = num_threads*sizeof(float);
unsigned int block_size = 256;
unsigned int num_blocks = ((num_threads + (block_size-1)) / block_size);
unsigned long int elapsed1;
unsigned long int elapsed2;
struct timeval t_start, t_end, t_diff;
float* h_in = (float*)malloc(mem_size);
float* h_out = (float*)malloc(mem_size);
float tmpList[listLength];
float epsilon = 1*1e-4;
for(unsigned int i = 0; i<num_threads; ++i){
h_in[i] = (float)i;
}
//Serial mapping
gettimeofday(&t_start, NULL);
for(int i = 0; i < listLength; ++i){
tmpList[i] = powf((h_in[i]/(h_in[i]-2.3)),3.0);
}
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed1 = t_diff.tv_sec*1e6+t_diff.tv_usec;
printf("Serial Mapping took %d microseconds (%.2fms)\n",elapsed1,elapsed1/1000.0);
//Parallel Mapping
float* d_in;
float* d_out;
cudaMalloc((void**)&d_in, mem_size);
cudaMalloc((void**)&d_out, mem_size);
cudaMemcpy(d_in, h_in, mem_size, cudaMemcpyHostToDevice);
gettimeofday(&t_start, NULL);
squareKernel<<< num_blocks, block_size>>>(d_in, d_out, num_threads);
cudaThreadSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed2 = t_diff.tv_sec*1e6+t_diff.tv_usec;
printf("Parallel mapping took %d microseconds (%.2fms)\n",elapsed2,elapsed2/1000.0);
cudaMemcpy(h_out, d_out, sizeof(float)*num_threads, cudaMemcpyDeviceToHost);
unsigned int mep = 1;
for(unsigned int i=0; i<num_threads; ++i){
if(abs(h_out[i] - tmpList[i]) > epsilon){
printf("Something failed: at index");
}
}
if(mep == 1){
std::cout<<"Valid\n";
}
if(mep == 0){
std::cout<<"Invalid\n";
}
// clean-up memory
free(h_in);
free(h_out);
cudaFree(d_in);
cudaFree(d_out);
}
|
039a475c86d0474b827c949a7a5fa21dc1e07aa2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zgemm_fermi.cu normal z -> s, Fri Jul 18 17:34:13 2014
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
gemm_stencil.cu defines the GPU kernel. It gets included
multiple times, once for each transpose version.
*/
#include "common_magma.h"
#include "commonblas_s.h"
#include <assert.h>
#define PRECISION_s
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "sgemm_fermi_kernels.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X**T or op( X ) = X**T,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
TRANSA CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = 'N': op( A ) = A.
- = 'T': op( A ) = A**T.
- = 'C': op( A ) = A**T.
@param[in]
TRANSB CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = 'N': op( B ) = B.
- = 'T': op( B ) = B**T.
- = 'C': op( B ) = B**T.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( d_A ) and of the matrix d_C. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( d_B ) and the number of columns of the matrix d_C. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( d_A ) and the number of rows of the matrix op( d_B ). K must
be at least zero.
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_A REAL array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = MagmaNoTrans, and is m otherwise.
Before entry with TRANSA = MagmaNoTrans, the leading m by k
part of the array d_A must contain the matrix d_A, otherwise
the leading k by m part of the array d_A must contain the
matrix d_A.
@param[in]
lda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
d_B REAL array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = MagmaNoTrans, and is k otherwise.
Before entry with TRANSB = MagmaNoTrans, the leading k by n
part of the array d_B must contain the matrix d_B, otherwise
the leading n by k part of the array d_B must contain the
matrix d_B.
@param[in]
ldb INTEGER.
On entry, LDB specifies the first dimension of d_B as declared
in the calling (sub) program. When TRANSB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then d_C need not be set on input.
@param[in,out]
d_C REAL array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array d_C must
contain the matrix d_C, except when beta is zero, in which
case d_C need not be set on entry.
On exit, the array d_C is overwritten by the m by n matrix
( alpha*op( d_A )*op( d_B ) + beta*d_C ).
@param[in]
ldc INTEGER.
On entry, LDC specifies the first dimension of d_C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_sgemm(
magma_trans_t TRANSA, magma_trans_t TRANSB, magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
const float *d_A, magma_int_t lda,
const float *d_B, magma_int_t ldb,
float beta,
float *d_C, magma_int_t ldc )
{
magma_int_t info = 0;
if ( TRANSA != MagmaNoTrans && TRANSA != MagmaTrans && TRANSA != Magma_ConjTrans )
info = -1;
else if ( TRANSB != MagmaNoTrans && TRANSB != MagmaTrans && TRANSB != Magma_ConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( TRANSA == MagmaNoTrans ? lda < m : lda < k )
info = -8;
else if ( TRANSB == MagmaNoTrans ? ldb < k : ldb < n )
info = -10;
else if ( ldc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
magma_sgemm(
TRANSA, TRANSB,
m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
#else
magmablas_sgemm_tesla(
TRANSA, TRANSB, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( TRANSA == MagmaTrans )
TransA = 1;
else if ( TRANSA == MagmaNoTrans )
TransA = 0;
if ( TRANSB == MagmaTrans )
TransB = 1;
else if ( TRANSB == MagmaNoTrans )
TransB = 0;
size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m);
size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k);
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_sgemm( TRANSA, TRANSB, m, n, k, alpha,
d_A, lda, d_B, ldb,
beta, d_C, ldc );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = hipFilterModePoint;
tex_ref_A.addressMode[0] = hipAddressModeClamp;
tex_ref_B.normalized = false;
tex_ref_B.filterMode = hipFilterModePoint;
tex_ref_B.addressMode[0] = hipAddressModeClamp;
// Bind A and B to texture references
hipError_t err;
err = hipBindTexture(&offsetA, tex_ref_A, d_A, sizeA*sizeof(float));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err );
return;
}
err = hipBindTexture(&offsetB, tex_ref_B, d_B, sizeB*sizeof(float));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", hipGetErrorString(err), err );
hipUnbindTexture( tex_ref_A );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(d_A[0]);
offsetB = offsetB/sizeof(d_B[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_nn + 1,
(n - 1)/BLK_N_nn + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_nn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_nt), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_nc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_tn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_tt + 1,
(n - 1)/BLK_N_tt + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_tt), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_tc + 1,
(n - 1)/BLK_N_tc + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_tc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_cn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_ct + 1,
(n - 1)/BLK_N_ct + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_ct), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_cc + 1,
(n - 1)/BLK_N_cc + 1 );
hipLaunchKernelGGL(( sgemm_kernel_fermi_cc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
hipUnbindTexture( tex_ref_A );
hipUnbindTexture( tex_ref_B );
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| 039a475c86d0474b827c949a7a5fa21dc1e07aa2.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zgemm_fermi.cu normal z -> s, Fri Jul 18 17:34:13 2014
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
gemm_stencil.cu defines the GPU kernel. It gets included
multiple times, once for each transpose version.
*/
#include "common_magma.h"
#include "commonblas_s.h"
#include <assert.h>
#define PRECISION_s
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "sgemm_fermi_kernels.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SGEMM performs one of the matrix-matrix operations
C = alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X**T or op( X ) = X**T,
alpha and beta are scalars, and A, B and C are matrices, with
op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
----------
@param[in]
TRANSA CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
- = 'N': op( A ) = A.
- = 'T': op( A ) = A**T.
- = 'C': op( A ) = A**T.
@param[in]
TRANSB CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
- = 'N': op( B ) = B.
- = 'T': op( B ) = B**T.
- = 'C': op( B ) = B**T.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix
op( d_A ) and of the matrix d_C. M must be at least zero.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix
op( d_B ) and the number of columns of the matrix d_C. N must be
at least zero.
@param[in]
k INTEGER.
On entry, K specifies the number of columns of the matrix
op( d_A ) and the number of rows of the matrix op( d_B ). K must
be at least zero.
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
d_A REAL array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = MagmaNoTrans, and is m otherwise.
Before entry with TRANSA = MagmaNoTrans, the leading m by k
part of the array d_A must contain the matrix d_A, otherwise
the leading k by m part of the array d_A must contain the
matrix d_A.
@param[in]
lda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = MagmaNoTrans then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
@param[in]
d_B REAL array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = MagmaNoTrans, and is k otherwise.
Before entry with TRANSB = MagmaNoTrans, the leading k by n
part of the array d_B must contain the matrix d_B, otherwise
the leading n by k part of the array d_B must contain the
matrix d_B.
@param[in]
ldb INTEGER.
On entry, LDB specifies the first dimension of d_B as declared
in the calling (sub) program. When TRANSB = MagmaNoTrans then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then d_C need not be set on input.
@param[in,out]
d_C REAL array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array d_C must
contain the matrix d_C, except when beta is zero, in which
case d_C need not be set on entry.
On exit, the array d_C is overwritten by the m by n matrix
( alpha*op( d_A )*op( d_B ) + beta*d_C ).
@param[in]
ldc INTEGER.
On entry, LDC specifies the first dimension of d_C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_sgemm(
magma_trans_t TRANSA, magma_trans_t TRANSB, magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
const float *d_A, magma_int_t lda,
const float *d_B, magma_int_t ldb,
float beta,
float *d_C, magma_int_t ldc )
{
magma_int_t info = 0;
if ( TRANSA != MagmaNoTrans && TRANSA != MagmaTrans && TRANSA != Magma_ConjTrans )
info = -1;
else if ( TRANSB != MagmaNoTrans && TRANSB != MagmaTrans && TRANSB != Magma_ConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( TRANSA == MagmaNoTrans ? lda < m : lda < k )
info = -8;
else if ( TRANSB == MagmaNoTrans ? ldb < k : ldb < n )
info = -10;
else if ( ldc < m )
info = -13;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sd] precisions, cublas for [zc] precisions.
#if defined(PRECISION_z) || defined(PRECISION_c)
magma_sgemm(
TRANSA, TRANSB,
m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
#else
magmablas_sgemm_tesla(
TRANSA, TRANSB, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
#endif
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( m <= 0 || n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 2, TransB = 2;
if ( TRANSA == MagmaTrans )
TransA = 1;
else if ( TRANSA == MagmaNoTrans )
TransA = 0;
if ( TRANSB == MagmaTrans )
TransB = 1;
else if ( TRANSB == MagmaNoTrans )
TransB = 0;
size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m);
size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k);
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ||
sizeB >= CUBLAS_MAX_1DBUF_SIZE )
{
magma_sgemm( TRANSA, TRANSB, m, n, k, alpha,
d_A, lda, d_B, ldb,
beta, d_C, ldc );
return;
}
#ifdef TEXTURE_1D
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = cudaFilterModePoint;
tex_ref_A.addressMode[0] = cudaAddressModeClamp;
tex_ref_B.normalized = false;
tex_ref_B.filterMode = cudaFilterModePoint;
tex_ref_B.addressMode[0] = cudaAddressModeClamp;
// Bind A and B to texture references
cudaError_t err;
err = cudaBindTexture(&offsetA, tex_ref_A, d_A, sizeA*sizeof(float));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err );
return;
}
err = cudaBindTexture(&offsetB, tex_ref_B, d_B, sizeB*sizeof(float));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind B to texture: %s (%d)\n", cudaGetErrorString(err), err );
cudaUnbindTexture( tex_ref_A );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(d_A[0]);
offsetB = offsetB/sizeof(d_B[0]);
if ( TransA == 0 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_nn + 1,
(n - 1)/BLK_N_nn + 1 );
sgemm_kernel_fermi_nn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 );
sgemm_kernel_fermi_nt<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 );
sgemm_kernel_fermi_nc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 );
sgemm_kernel_fermi_tn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_tt + 1,
(n - 1)/BLK_N_tt + 1 );
sgemm_kernel_fermi_tt<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 1 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_tc + 1,
(n - 1)/BLK_N_tc + 1 );
sgemm_kernel_fermi_tc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (m - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 );
sgemm_kernel_fermi_cn<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 1 ) {
dim3 dimGrid( (m - 1)/BLK_M_ct + 1,
(n - 1)/BLK_N_ct + 1 );
sgemm_kernel_fermi_ct<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
else if ( TransA == 2 && TransB == 2 ) {
dim3 dimGrid( (m - 1)/BLK_M_cc + 1,
(n - 1)/BLK_N_cc + 1 );
sgemm_kernel_fermi_cc<<< dimGrid, dimBlock, 0, magma_stream >>>(
m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta,
(int)offsetA, (int)offsetB );
}
cudaUnbindTexture( tex_ref_A );
cudaUnbindTexture( tex_ref_B );
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
18ca93c7ed80c72197872836d8967603a923b217.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <stdio.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <cfloat>
#include <utility>
// #define USE_EXAMPLE /* uncomment to use fixed example instead of generating vectors */
// #define PRINT_ANSWERS /* uncomment to print out solution */
const int threads_per_block = 256;
// Forward function declarations
/*
Task: Perform a 2D convolution on an NxN matrix A_in laid out in memory
with contiguous rows and a KxK filter F. K must be odd. Put
the result in A_out.
With this layout, A row i column j is A[i * N + j]; and
F row i column j is F[i * N + j].
When elements outside A are required apply this by "extending"
the edge (see also description in the Wikipedia article), substituting
the closest value within A. For example, if accessing row -1, col 3 of A
is required, instead substitute row 0, col 3 of A (if As rows numbered
from 0).
This means that for any given element in A, select a KxK neighborhood
around it, and multiply it element-by-element with the KxK filter,
then sum the results and put the sum in the corresponding element of A_out.
For example, if K = 3, then this means that
// A_out(row i, col j) =
// F(row 0, col 0) * A_in(row MAX(i - 1, 0), col j - 1) + ...
A_out[i * N + j] =
F[0 * K + 0] * A_in[MAX(i - 1, 0) * N + MAX(j - 1, 0)] +
F[0 * K + 1] * A_in[MAX(i - 1, 0) * N + j] +
F[0 * K + 2] * A_in[MAX(i - 1, 0) * N + MIN(j + 1, N-1)] +
F[1 * K + 0] * A_in[i * N + MAX(j - 1, 0)] +
F[1 * K + 1] * A_in[i * N + j] +
F[1 * K + 2] * A_in[i * N + MIN(j + 1, N)] +
F[2 * K + 0] * A_in[MIN(i + 1, N-1) * N + MAX(j - 1, 0)] +
F[2 * K + 1] * A_in[MIN(i + 1, N-1) * N + j] +
F[2 * K + 2] * A_in[MIN(i + 1, N-1) * N + MIN(j + 1, N-1)];
See also:
- CPU_convolve() which implements this below.
- https://en.wikipedia.org/wiki/Kernel_(image_processing)
- https://docs.gimp.org/en/plug-in-convmatrix.html
*/
void GPU_convolve(float *A_in, float *A_out, int N, float *F, int K, int kernel_code, float *kernel_time, float *transfer_time);
void CPU_convolve(float *A_in, float *A_out, int N, float *F, int K);
float *get_random_vector(int N);
float *get_increasing_vector(int N);
float usToSec(long long time);
long long start_timer();
long long stop_timer(long long start_time, const char *name);
void printMatrix(float *X, int N, int M); // utility function you can use
void die(const char *message);
void checkError();
// Main program
int main(int argc, char **argv) {
//default kernel
int kernel_code = 1;
// Parse vector length and kernel options
int N, K;
#ifdef USE_EXAMPLE
if(argc == 1) {
} else if (argc == 3 && !strcmp(argv[1], "-k")) {
kernel_code = atoi(argv[2]);
printf("KERNEL_CODE %d\n", kernel_code);
} else {
die("USAGE: ./2d_convolve -k <kernel_code> # uses hardcoded example");
}
#else
if(argc == 3) {
N = atoi(argv[1]); // user-specified value
K = atoi(argv[2]); // user-specified value
} else if (argc == 5 && !strcmp(argv[3], "-k")) {
N = atoi(argv[1]); // user-specified value
K = atoi(argv[2]); // user-specified value
kernel_code = atoi(argv[4]);
printf("KERNEL_CODE %d\n", kernel_code);
} else {
die("USAGE: ./2d_convolve <N> <K> -k <kernel_code> # image is NxN, filter is KxK");
}
#endif
// Seed the random generator (use a constant here for repeatable results)
srand(10);
// Generate random matrices
long long vector_start_time = start_timer();
#ifdef USE_EXAMPLE
float A_in[25] = {
1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
21, 22, 23, 24, 25
};
N = 5;
K = 3;
float F[9] = {
1, 1, 0,
0, 0, 0,
0, 0, -1,
};
#else
float *A_in = get_random_vector(N * N);
float *F = get_random_vector(K * K);
for (int i = 0; i < K * K; ++i) {
F[i] /= K * K;
}
#endif
stop_timer(vector_start_time, "Vector generation");
float *A_out_GPU;
float *A_out_CPU;
hipHostMalloc((void **) &A_out_GPU, N * N * sizeof(float));
hipHostMalloc((void **) &A_out_CPU, N * N * sizeof(float));
memset(A_out_CPU, 0, N * N * sizeof(float));
memset(A_out_GPU, 0, N * N * sizeof(float));
int num_blocks = (int) ((float) (N + threads_per_block - 1) / (float) threads_per_block);
int max_blocks_per_dimension = 65535;
int num_blocks_y = (int) ((float) (num_blocks + max_blocks_per_dimension - 1) / (float) max_blocks_per_dimension);
int num_blocks_x = (int) ((float) (num_blocks + num_blocks_y - 1) / (float) num_blocks_y);
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
// Compute the max on the GPU
float GPU_kernel_time = INFINITY;
float transfer_time = INFINITY;
long long GPU_start_time = start_timer();
GPU_convolve(A_in, A_out_GPU, N, F, K, kernel_code, &GPU_kernel_time, &transfer_time);
long long GPU_time = stop_timer(GPU_start_time, "\n Total");
printf("%f\n", GPU_kernel_time);
// Compute the max on the CPU
long long CPU_start_time = start_timer();
CPU_convolve(A_in, A_out_CPU, N, F, K);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU");
#ifndef USE_EXAMPLE
// Free matrices
hipFree(A_in);
hipFree(F);
#endif
// Compute the speedup or slowdown
//// Not including data transfer
if (GPU_kernel_time > usToSec(CPU_time)) printf("\nCPU outperformed GPU kernel by %.2fx\n", (float) (GPU_kernel_time) / usToSec(CPU_time));
else printf("\nGPU kernel outperformed CPU by %.2fx\n", (float) usToSec(CPU_time) / (float) GPU_kernel_time);
//// Including data transfer
if (GPU_time > CPU_time) printf("\nCPU outperformed GPU total runtime (including data transfer) by %.2fx\n", (float) GPU_time / (float) CPU_time);
else printf("\nGPU total runtime (including data transfer) outperformed CPU by %.2fx\n", (float) CPU_time / (float) GPU_time);
#ifdef PRINT_ANSWERS
printf("CPU result:\n");
printMatrix(A_out_CPU, N, N);
printf("GPU result:\n");
printMatrix(A_out_GPU, N, N);
#endif
// Check the correctness of the GPU results
float max_delta = 0.0f;
for (int i = 0; i < N * N; ++i) {
float cpu = A_out_CPU[i];
float gpu = A_out_GPU[i];
float delta = fabs(gpu - cpu);
if (delta > max_delta) {
/* printf("%f/%f/%f\n", gpu, cpu, real); */
max_delta = delta;
}
}
hipFree(A_out_CPU);
hipFree(A_out_GPU);
/* This should be lenient enough to allow additions/substractions to occur in a different order */
int wrong = max_delta > 1e-6 * 2 * K;
// Report the correctness results
if(wrong) printf("GPU output did not match CPU output (max error %.2f%%)\n", max_delta * 100.);
}
static int clamp(int x, int low, int high) {
if (x < low)
return low;
else if (x > high)
return high;
else
return x;
}
// paralell
/*__global__ void vector_max_kernelX(float *A_in, float *A_out, int N1, float *F, int K1, int offset1) {
// Determine the "flattened" block id and thread id
int block_id = blockIdx.x + gridDim.x * blockIdx.y;
int thread_id = blockDim.x * block_id + threadIdx.x;
int thread_i;
int i = thread_id;
__shared__ int N, K, offset;
K=K1;
N=N1;
offset = offset1;
int a,b;
__shared__ int min_offset, max_offset;
min_offset = -(K-1) / 2;
max_offset = (K-1) / 2;
int ii = thread_i/K - (K * K / 2);
int jj = thread_i%K - (K * K / 2);
float *F_center = &F[ K * K / 2 ];
float result = 0.0;
if (i+ii<0) {
if (j+jj<0)
result += A_in[0] * F_center[ii * K +jj];
else if (j+jj>N-1)
result += A_in[N-1] * F_center[ii * K +jj];
else
result += A_in[j+jj] * F_center[ii * K +jj];
}
else if (i+ii> N-1) {
if (j+jj<0)
result += A_in[(N-1)*N] * F_center[ii * K +jj];
else if(j+jj>N-1)
result += A_in[(N-1)*N+N-1] * F_center[ii * K +jj];
else
result += A_in[(N-1)*N+j+jj] * F_center[ii * K +jj];
}
else {
if (j+jj<0)
result += A_in[i+ii] * F_center[ii * K +jj];
else if(j+jj>N-1)
result += A_in[(i+ii)*N+N-1] * F_center[ii * K +jj];
else
result += A_in[(i+ii)*N+j+jj] * F_center[ii * K +jj];
}
A_out[i * N + j ] += result;
}*/
__global__ void vector_max_kernel(float *A_in, float *A_out, int N1, float *F, int K1) {
// Determine the "flattened" block id and thread id
int block_id = blockIdx.x + gridDim.x * blockIdx.y;
int thread_id = blockDim.x * block_id + threadIdx.x;
__shared__ int N, K;
/* __shared__ float F_s[256],A_in_s[256];
A_in_s[thread_id_lo] = A_in[thread_id];
F_s[thread_id_lo] = F[thread_id];
__syncthreads();*/
K=K1;
N=N1;
int a,b;
__shared__ int min_offset, max_offset;
min_offset = -(K-1) / 2;
max_offset = (K-1) / 2;
float *F_center = &F[ K * K / 2 ];
int i,j;
i=thread_id/N;
j=thread_id%N;
float result = 0.0;
for (int ii = min_offset; ii <= max_offset; ++ii) {
for (int jj = min_offset; jj <= max_offset; ++jj) {
if (i+ii < 0)
a=0;
else if (i+ii > N-1)
a=N-1;
else
a=i+ii;
if (j+jj < 0)
b=0;
else if (j+jj > N-1)
b=N-1;
else
b=j+jj;
result += A_in[a*N + b] *
F_center[ii * K +jj];
}
}
A_out[i * N + j ] = result;
}
void GPU_convolve(float *A_in, float *A_out, int N, float *F, int K, int kernel_code, float *kernel_runtime, float *transfer_runtime) {
// IMPLEMENT YOUR BFS AND TIMING CODE HERE
long long transfer_time = 0;
long long kernel_time = 0;
int A_size = N * N * sizeof(float);
int F_size = K * K * sizeof(float);
/* // Allocate CPU memory for the result
float *out_CPU;
hipHostMalloc((void **) &out_CPU, A_size * sizeof(float));
if (out_CPU == NULL) die("Error allocating CPU memory");
*/
// Allocate GPU memory for the inputs and the result
long long memory_start_time = start_timer();
float *A_GPU, *out_GPU, *F_GPU;
if (hipMalloc((void **) &A_GPU, A_size) != hipSuccess) die("Error allocating GPU memory");
if (hipMalloc((void **) &out_GPU, A_size) != hipSuccess) die("Error allocating GPU memory");
if (hipMalloc((void **) &F_GPU, F_size) != hipSuccess) die("Error allocating GPU memory");
// Transfer the input vectors to GPU memory
hipMemcpy(A_GPU, A_in, A_size, hipMemcpyHostToDevice);
hipMemcpy(F_GPU, F, F_size, hipMemcpyHostToDevice);
hipDeviceSynchronize(); // this is only needed for timing purposes
transfer_time += stop_timer(memory_start_time, "\nGPU:\t Transfer to GPU");
// Determine the number of thread blocks in the x- and y-dimension
int num_blocks = (int) ((float) (N*N + threads_per_block - 1) / (float) threads_per_block);
int max_blocks_per_dimension = 65535;
int num_blocks_y = (int) ((float) (num_blocks + max_blocks_per_dimension - 1) / (float) max_blocks_per_dimension);
int num_blocks_x = (int) ((float) (num_blocks + num_blocks_y - 1) / (float) num_blocks_y);
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
// Execute the kernel to compute the vector sum on the GPU
long long kernel_start_time;
kernel_start_time = start_timer();
hipLaunchKernelGGL(( vector_max_kernel) , dim3(grid_size) , dim3(threads_per_block) , 0, 0, A_GPU, out_GPU, N, F_GPU, K);
hipDeviceSynchronize(); // this is only needed for timing purposes
kernel_time += stop_timer(kernel_start_time, "\t Kernel execution");
checkError();
// Transfer the result from the GPU to the CPU
memory_start_time = start_timer();
//copy C back
hipMemcpy(A_out, out_GPU, A_size, hipMemcpyDeviceToHost);
checkError();
hipDeviceSynchronize(); // this is only needed for timing purposes
transfer_time += stop_timer(memory_start_time, "\tTransfer from GPU");
// Free the GPU memory
hipFree(A_GPU);
hipFree(out_GPU);
hipFree(F_GPU);
// fill input pointers with ms runtimes
*kernel_runtime = usToSec(kernel_time);
*transfer_runtime = usToSec(transfer_time);
//return a single statistic
// return 0;
}
void CPU_convolve(float *A_in, float *A_out, int N, float *F, int K) {
int min_offset = -(K-1) / 2;
int max_offset = (K-1) / 2;
float *F_center = &F[ K * K / 2 ];
// If K = 5, F_center points (row 2, col 2), so F_center[1] is (row 2, col 3);
// F_center[-K] is (row 1, col 2) F_center[K + 1] is (row 3, col 3), etc.
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
float result = 0.0;
for (int ii = min_offset; ii <= max_offset; ++ii) {
for (int jj = min_offset; jj <= max_offset; ++jj) {
result += A_in[clamp(i+ii, 0, N-1)*N + clamp(j+jj, 0, N-1)] *
F_center[ii * K + jj];
}
}
A_out[i * N + j ] = result;
}
}
}
// Returns a randomized vector containing N elements
// This verison generates vector containing values in the range [0,2)
float *get_random_vector(int N) {
if (N < 1) die("Number of elements must be greater than zero");
// Allocate memory for the vector
float *V;
hipHostMalloc((void **) &V, N * sizeof(float));
if (V == NULL) die("Error allocating CPU memory");
// Populate the vector with random numbers
for (int i = 0; i < N; i++) V[i] = rand() * 2.0f / RAND_MAX;
// Return the randomized vector
return V;
}
void printMatrix(float *X, int N, int M) {
for (int i = 0; i < N; ++i) {
printf("row %d: ", i);
for (int j = 0; j < M; ++j) {
printf("%f ", X[i * M + j]);
}
printf("\n");
}
}
void checkError() {
// Check for kernel errors
hipError_t error = hipGetLastError();
if (error) {
char message[256];
sprintf(message, "CUDA error: %s", hipGetErrorString(error));
die(message);
}
}
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// converts a long long ns value to float seconds
float usToSec(long long time) {
return ((float)time)/(1000000);
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, const char *name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
float elapsed = usToSec(end_time - start_time);
printf("%s: %.5f sec\n", name, elapsed);
return end_time - start_time;
}
// Prints the specified message and quits
void die(const char *message) {
printf("%s\n", message);
exit(1);
}
| 18ca93c7ed80c72197872836d8967603a923b217.cu | #include <assert.h>
#include <stdio.h>
#include <sys/time.h>
#include <cuda.h>
#include <cfloat>
#include <utility>
// #define USE_EXAMPLE /* uncomment to use fixed example instead of generating vectors */
// #define PRINT_ANSWERS /* uncomment to print out solution */
const int threads_per_block = 256;
// Forward function declarations
/*
Task: Perform a 2D convolution on an NxN matrix A_in laid out in memory
with contiguous rows and a KxK filter F. K must be odd. Put
the result in A_out.
With this layout, A row i column j is A[i * N + j]; and
F row i column j is F[i * N + j].
When elements outside A are required apply this by "extending"
the edge (see also description in the Wikipedia article), substituting
the closest value within A. For example, if accessing row -1, col 3 of A
is required, instead substitute row 0, col 3 of A (if As rows numbered
from 0).
This means that for any given element in A, select a KxK neighborhood
around it, and multiply it element-by-element with the KxK filter,
then sum the results and put the sum in the corresponding element of A_out.
For example, if K = 3, then this means that
// A_out(row i, col j) =
// F(row 0, col 0) * A_in(row MAX(i - 1, 0), col j - 1) + ...
A_out[i * N + j] =
F[0 * K + 0] * A_in[MAX(i - 1, 0) * N + MAX(j - 1, 0)] +
F[0 * K + 1] * A_in[MAX(i - 1, 0) * N + j] +
F[0 * K + 2] * A_in[MAX(i - 1, 0) * N + MIN(j + 1, N-1)] +
F[1 * K + 0] * A_in[i * N + MAX(j - 1, 0)] +
F[1 * K + 1] * A_in[i * N + j] +
F[1 * K + 2] * A_in[i * N + MIN(j + 1, N)] +
F[2 * K + 0] * A_in[MIN(i + 1, N-1) * N + MAX(j - 1, 0)] +
F[2 * K + 1] * A_in[MIN(i + 1, N-1) * N + j] +
F[2 * K + 2] * A_in[MIN(i + 1, N-1) * N + MIN(j + 1, N-1)];
See also:
- CPU_convolve() which implements this below.
- https://en.wikipedia.org/wiki/Kernel_(image_processing)
- https://docs.gimp.org/en/plug-in-convmatrix.html
*/
void GPU_convolve(float *A_in, float *A_out, int N, float *F, int K, int kernel_code, float *kernel_time, float *transfer_time);
void CPU_convolve(float *A_in, float *A_out, int N, float *F, int K);
float *get_random_vector(int N);
float *get_increasing_vector(int N);
float usToSec(long long time);
long long start_timer();
long long stop_timer(long long start_time, const char *name);
void printMatrix(float *X, int N, int M); // utility function you can use
void die(const char *message);
void checkError();
// Main program
int main(int argc, char **argv) {
//default kernel
int kernel_code = 1;
// Parse vector length and kernel options
int N, K;
#ifdef USE_EXAMPLE
if(argc == 1) {
} else if (argc == 3 && !strcmp(argv[1], "-k")) {
kernel_code = atoi(argv[2]);
printf("KERNEL_CODE %d\n", kernel_code);
} else {
die("USAGE: ./2d_convolve -k <kernel_code> # uses hardcoded example");
}
#else
if(argc == 3) {
N = atoi(argv[1]); // user-specified value
K = atoi(argv[2]); // user-specified value
} else if (argc == 5 && !strcmp(argv[3], "-k")) {
N = atoi(argv[1]); // user-specified value
K = atoi(argv[2]); // user-specified value
kernel_code = atoi(argv[4]);
printf("KERNEL_CODE %d\n", kernel_code);
} else {
die("USAGE: ./2d_convolve <N> <K> -k <kernel_code> # image is NxN, filter is KxK");
}
#endif
// Seed the random generator (use a constant here for repeatable results)
srand(10);
// Generate random matrices
long long vector_start_time = start_timer();
#ifdef USE_EXAMPLE
float A_in[25] = {
1, 2, 3, 4, 5,
6, 7, 8, 9, 10,
11, 12, 13, 14, 15,
16, 17, 18, 19, 20,
21, 22, 23, 24, 25
};
N = 5;
K = 3;
float F[9] = {
1, 1, 0,
0, 0, 0,
0, 0, -1,
};
#else
float *A_in = get_random_vector(N * N);
float *F = get_random_vector(K * K);
for (int i = 0; i < K * K; ++i) {
F[i] /= K * K;
}
#endif
stop_timer(vector_start_time, "Vector generation");
float *A_out_GPU;
float *A_out_CPU;
cudaMallocHost((void **) &A_out_GPU, N * N * sizeof(float));
cudaMallocHost((void **) &A_out_CPU, N * N * sizeof(float));
memset(A_out_CPU, 0, N * N * sizeof(float));
memset(A_out_GPU, 0, N * N * sizeof(float));
int num_blocks = (int) ((float) (N + threads_per_block - 1) / (float) threads_per_block);
int max_blocks_per_dimension = 65535;
int num_blocks_y = (int) ((float) (num_blocks + max_blocks_per_dimension - 1) / (float) max_blocks_per_dimension);
int num_blocks_x = (int) ((float) (num_blocks + num_blocks_y - 1) / (float) num_blocks_y);
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
// Compute the max on the GPU
float GPU_kernel_time = INFINITY;
float transfer_time = INFINITY;
long long GPU_start_time = start_timer();
GPU_convolve(A_in, A_out_GPU, N, F, K, kernel_code, &GPU_kernel_time, &transfer_time);
long long GPU_time = stop_timer(GPU_start_time, "\n Total");
printf("%f\n", GPU_kernel_time);
// Compute the max on the CPU
long long CPU_start_time = start_timer();
CPU_convolve(A_in, A_out_CPU, N, F, K);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU");
#ifndef USE_EXAMPLE
// Free matrices
cudaFree(A_in);
cudaFree(F);
#endif
// Compute the speedup or slowdown
//// Not including data transfer
if (GPU_kernel_time > usToSec(CPU_time)) printf("\nCPU outperformed GPU kernel by %.2fx\n", (float) (GPU_kernel_time) / usToSec(CPU_time));
else printf("\nGPU kernel outperformed CPU by %.2fx\n", (float) usToSec(CPU_time) / (float) GPU_kernel_time);
//// Including data transfer
if (GPU_time > CPU_time) printf("\nCPU outperformed GPU total runtime (including data transfer) by %.2fx\n", (float) GPU_time / (float) CPU_time);
else printf("\nGPU total runtime (including data transfer) outperformed CPU by %.2fx\n", (float) CPU_time / (float) GPU_time);
#ifdef PRINT_ANSWERS
printf("CPU result:\n");
printMatrix(A_out_CPU, N, N);
printf("GPU result:\n");
printMatrix(A_out_GPU, N, N);
#endif
// Check the correctness of the GPU results
float max_delta = 0.0f;
for (int i = 0; i < N * N; ++i) {
float cpu = A_out_CPU[i];
float gpu = A_out_GPU[i];
float delta = fabs(gpu - cpu);
if (delta > max_delta) {
/* printf("%f/%f/%f\n", gpu, cpu, real); */
max_delta = delta;
}
}
cudaFree(A_out_CPU);
cudaFree(A_out_GPU);
/* This should be lenient enough to allow additions/substractions to occur in a different order */
int wrong = max_delta > 1e-6 * 2 * K;
// Report the correctness results
if(wrong) printf("GPU output did not match CPU output (max error %.2f%%)\n", max_delta * 100.);
}
static int clamp(int x, int low, int high) {
if (x < low)
return low;
else if (x > high)
return high;
else
return x;
}
// paralell
/*__global__ void vector_max_kernelX(float *A_in, float *A_out, int N1, float *F, int K1, int offset1) {
// Determine the "flattened" block id and thread id
int block_id = blockIdx.x + gridDim.x * blockIdx.y;
int thread_id = blockDim.x * block_id + threadIdx.x;
int thread_i;
int i = thread_id;
__shared__ int N, K, offset;
K=K1;
N=N1;
offset = offset1;
int a,b;
__shared__ int min_offset, max_offset;
min_offset = -(K-1) / 2;
max_offset = (K-1) / 2;
int ii = thread_i/K - (K * K / 2);
int jj = thread_i%K - (K * K / 2);
float *F_center = &F[ K * K / 2 ];
float result = 0.0;
if (i+ii<0) {
if (j+jj<0)
result += A_in[0] * F_center[ii * K +jj];
else if (j+jj>N-1)
result += A_in[N-1] * F_center[ii * K +jj];
else
result += A_in[j+jj] * F_center[ii * K +jj];
}
else if (i+ii> N-1) {
if (j+jj<0)
result += A_in[(N-1)*N] * F_center[ii * K +jj];
else if(j+jj>N-1)
result += A_in[(N-1)*N+N-1] * F_center[ii * K +jj];
else
result += A_in[(N-1)*N+j+jj] * F_center[ii * K +jj];
}
else {
if (j+jj<0)
result += A_in[i+ii] * F_center[ii * K +jj];
else if(j+jj>N-1)
result += A_in[(i+ii)*N+N-1] * F_center[ii * K +jj];
else
result += A_in[(i+ii)*N+j+jj] * F_center[ii * K +jj];
}
A_out[i * N + j ] += result;
}*/
__global__ void vector_max_kernel(float *A_in, float *A_out, int N1, float *F, int K1) {
// Determine the "flattened" block id and thread id
int block_id = blockIdx.x + gridDim.x * blockIdx.y;
int thread_id = blockDim.x * block_id + threadIdx.x;
__shared__ int N, K;
/* __shared__ float F_s[256],A_in_s[256];
A_in_s[thread_id_lo] = A_in[thread_id];
F_s[thread_id_lo] = F[thread_id];
__syncthreads();*/
K=K1;
N=N1;
int a,b;
__shared__ int min_offset, max_offset;
min_offset = -(K-1) / 2;
max_offset = (K-1) / 2;
float *F_center = &F[ K * K / 2 ];
int i,j;
i=thread_id/N;
j=thread_id%N;
float result = 0.0;
for (int ii = min_offset; ii <= max_offset; ++ii) {
for (int jj = min_offset; jj <= max_offset; ++jj) {
if (i+ii < 0)
a=0;
else if (i+ii > N-1)
a=N-1;
else
a=i+ii;
if (j+jj < 0)
b=0;
else if (j+jj > N-1)
b=N-1;
else
b=j+jj;
result += A_in[a*N + b] *
F_center[ii * K +jj];
}
}
A_out[i * N + j ] = result;
}
void GPU_convolve(float *A_in, float *A_out, int N, float *F, int K, int kernel_code, float *kernel_runtime, float *transfer_runtime) {
// IMPLEMENT YOUR BFS AND TIMING CODE HERE
long long transfer_time = 0;
long long kernel_time = 0;
int A_size = N * N * sizeof(float);
int F_size = K * K * sizeof(float);
/* // Allocate CPU memory for the result
float *out_CPU;
cudaMallocHost((void **) &out_CPU, A_size * sizeof(float));
if (out_CPU == NULL) die("Error allocating CPU memory");
*/
// Allocate GPU memory for the inputs and the result
long long memory_start_time = start_timer();
float *A_GPU, *out_GPU, *F_GPU;
if (cudaMalloc((void **) &A_GPU, A_size) != cudaSuccess) die("Error allocating GPU memory");
if (cudaMalloc((void **) &out_GPU, A_size) != cudaSuccess) die("Error allocating GPU memory");
if (cudaMalloc((void **) &F_GPU, F_size) != cudaSuccess) die("Error allocating GPU memory");
// Transfer the input vectors to GPU memory
cudaMemcpy(A_GPU, A_in, A_size, cudaMemcpyHostToDevice);
cudaMemcpy(F_GPU, F, F_size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize(); // this is only needed for timing purposes
transfer_time += stop_timer(memory_start_time, "\nGPU:\t Transfer to GPU");
// Determine the number of thread blocks in the x- and y-dimension
int num_blocks = (int) ((float) (N*N + threads_per_block - 1) / (float) threads_per_block);
int max_blocks_per_dimension = 65535;
int num_blocks_y = (int) ((float) (num_blocks + max_blocks_per_dimension - 1) / (float) max_blocks_per_dimension);
int num_blocks_x = (int) ((float) (num_blocks + num_blocks_y - 1) / (float) num_blocks_y);
dim3 grid_size(num_blocks_x, num_blocks_y, 1);
// Execute the kernel to compute the vector sum on the GPU
long long kernel_start_time;
kernel_start_time = start_timer();
vector_max_kernel <<< grid_size , threads_per_block >>> (A_GPU, out_GPU, N, F_GPU, K);
cudaDeviceSynchronize(); // this is only needed for timing purposes
kernel_time += stop_timer(kernel_start_time, "\t Kernel execution");
checkError();
// Transfer the result from the GPU to the CPU
memory_start_time = start_timer();
//copy C back
cudaMemcpy(A_out, out_GPU, A_size, cudaMemcpyDeviceToHost);
checkError();
cudaDeviceSynchronize(); // this is only needed for timing purposes
transfer_time += stop_timer(memory_start_time, "\tTransfer from GPU");
// Free the GPU memory
cudaFree(A_GPU);
cudaFree(out_GPU);
cudaFree(F_GPU);
// fill input pointers with ms runtimes
*kernel_runtime = usToSec(kernel_time);
*transfer_runtime = usToSec(transfer_time);
//return a single statistic
// return 0;
}
void CPU_convolve(float *A_in, float *A_out, int N, float *F, int K) {
int min_offset = -(K-1) / 2;
int max_offset = (K-1) / 2;
float *F_center = &F[ K * K / 2 ];
// If K = 5, F_center points (row 2, col 2), so F_center[1] is (row 2, col 3);
// F_center[-K] is (row 1, col 2) F_center[K + 1] is (row 3, col 3), etc.
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
float result = 0.0;
for (int ii = min_offset; ii <= max_offset; ++ii) {
for (int jj = min_offset; jj <= max_offset; ++jj) {
result += A_in[clamp(i+ii, 0, N-1)*N + clamp(j+jj, 0, N-1)] *
F_center[ii * K + jj];
}
}
A_out[i * N + j ] = result;
}
}
}
// Returns a randomized vector containing N elements
// This verison generates vector containing values in the range [0,2)
float *get_random_vector(int N) {
if (N < 1) die("Number of elements must be greater than zero");
// Allocate memory for the vector
float *V;
cudaMallocHost((void **) &V, N * sizeof(float));
if (V == NULL) die("Error allocating CPU memory");
// Populate the vector with random numbers
for (int i = 0; i < N; i++) V[i] = rand() * 2.0f / RAND_MAX;
// Return the randomized vector
return V;
}
void printMatrix(float *X, int N, int M) {
for (int i = 0; i < N; ++i) {
printf("row %d: ", i);
for (int j = 0; j < M; ++j) {
printf("%f ", X[i * M + j]);
}
printf("\n");
}
}
void checkError() {
// Check for kernel errors
cudaError_t error = cudaGetLastError();
if (error) {
char message[256];
sprintf(message, "CUDA error: %s", cudaGetErrorString(error));
die(message);
}
}
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// converts a long long ns value to float seconds
float usToSec(long long time) {
return ((float)time)/(1000000);
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, const char *name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
float elapsed = usToSec(end_time - start_time);
printf("%s: %.5f sec\n", name, elapsed);
return end_time - start_time;
}
// Prints the specified message and quits
void die(const char *message) {
printf("%s\n", message);
exit(1);
}
|
56a4b749390da6284cd4d7f7c00f12a07db192b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <functional>
#include <iostream>
#include <random>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "parallel_reduction.cuh"
template <typename T>
T cpu_reduction(const std::vector<T> & input, std::function<T(T,T)> op, T acc)
{
for (int i = 0; i < input.size(); ++i)
acc = op(acc, input[i]);
return acc;
}
int main(void)
{
std::random_device random_device_;
std::mt19937 generator_(random_device_());
std::uniform_real_distribution<float> distribution_(-1.0, 1.0);
const int kNumElements = 32768;
const int kNumBytes = kNumElements * sizeof(float);
std::cout << "Generating random vector in range [-1.0f, 1.0f] of " << kNumElements << " elements...\n";
std::vector<float> h_input_(kNumElements);
for (int i = 0; i < h_input_.size(); ++i)
h_input_[i] = distribution_(generator_);
// --- CPU ---------------------------------------------------------------- //
std::cout << "Executing sum reduction in CPU...\n";
std::function<float(float,float)> cpu_sum_operator_ = [] (float a, float b) -> float { return a+b; };
float result_ = cpu_reduction<float>(h_input_, cpu_sum_operator_, 0.0f);
std::cout << "Result is: " << result_ << "\n";
// --- GPU ---------------------------------------------------------------- //
std::cout << "Executing sum reduction in GPU...\n";
const int threads_per_block_ = 1024;
const int blocks_per_grid_ = kNumElements / threads_per_block_;
hipSetDevice(0);
float h_output_ = 0.0f;
float *d_input_;
float *d_intermediate_;
float *d_output_;
hipMalloc((void**)&d_input_, kNumBytes);
hipMalloc((void**)&d_intermediate_, kNumBytes); // Overallocated
hipMalloc((void**)&d_output_, sizeof(float));
hipMemcpy(d_input_, h_input_.data(), kNumBytes, hipMemcpyHostToDevice);
dim3 tpb_(threads_per_block_, 1, 1);
dim3 bpg_(blocks_per_grid_, 1, 1);
std::cout << "Threads Per Block: " << tpb_.x << "\n";
std::cout << "Blocks Per Grid: " << bpg_.x << "\n";
// Naive GPU implementation
hipLaunchKernelGGL(( gpu_reduction_naive), dim3(bpg_), dim3(tpb_), 0, 0, d_input_, d_intermediate_);
hipLaunchKernelGGL(( gpu_reduction_naive), dim3(1), dim3(bpg_), 0, 0, d_intermediate_, d_output_);
// Coalesced GPU implementation
//gpu_reduction_coalesced<<<bpg_, tpb_>>>(d_input_, d_intermediate_);
//gpu_reduction_coalesced<<<1, bpg_>>>(d_intermediate_, d_output_);
// Shared Memory GPU implementation
//gpu_reduction_shmem<<<bpg_, tpb_, tpb_.x * sizeof(float)>>>(d_input_, d_intermediate_);
//gpu_reduction_shmem<<<1, bpg_, bpg_.x * sizeof(float)>>>(d_intermediate_, d_output_);
hipMemcpy(&h_output_, d_output_, sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_input_);
hipFree(d_intermediate_);
hipFree(d_output_);
hipDeviceReset();
std::cout << "Result is: " << h_output_ << "\n";
}
| 56a4b749390da6284cd4d7f7c00f12a07db192b4.cu | #include <functional>
#include <iostream>
#include <random>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "parallel_reduction.cuh"
template <typename T>
T cpu_reduction(const std::vector<T> & input, std::function<T(T,T)> op, T acc)
{
for (int i = 0; i < input.size(); ++i)
acc = op(acc, input[i]);
return acc;
}
int main(void)
{
std::random_device random_device_;
std::mt19937 generator_(random_device_());
std::uniform_real_distribution<float> distribution_(-1.0, 1.0);
const int kNumElements = 32768;
const int kNumBytes = kNumElements * sizeof(float);
std::cout << "Generating random vector in range [-1.0f, 1.0f] of " << kNumElements << " elements...\n";
std::vector<float> h_input_(kNumElements);
for (int i = 0; i < h_input_.size(); ++i)
h_input_[i] = distribution_(generator_);
// --- CPU ---------------------------------------------------------------- //
std::cout << "Executing sum reduction in CPU...\n";
std::function<float(float,float)> cpu_sum_operator_ = [] (float a, float b) -> float { return a+b; };
float result_ = cpu_reduction<float>(h_input_, cpu_sum_operator_, 0.0f);
std::cout << "Result is: " << result_ << "\n";
// --- GPU ---------------------------------------------------------------- //
std::cout << "Executing sum reduction in GPU...\n";
const int threads_per_block_ = 1024;
const int blocks_per_grid_ = kNumElements / threads_per_block_;
cudaSetDevice(0);
float h_output_ = 0.0f;
float *d_input_;
float *d_intermediate_;
float *d_output_;
cudaMalloc((void**)&d_input_, kNumBytes);
cudaMalloc((void**)&d_intermediate_, kNumBytes); // Overallocated
cudaMalloc((void**)&d_output_, sizeof(float));
cudaMemcpy(d_input_, h_input_.data(), kNumBytes, cudaMemcpyHostToDevice);
dim3 tpb_(threads_per_block_, 1, 1);
dim3 bpg_(blocks_per_grid_, 1, 1);
std::cout << "Threads Per Block: " << tpb_.x << "\n";
std::cout << "Blocks Per Grid: " << bpg_.x << "\n";
// Naive GPU implementation
gpu_reduction_naive<<<bpg_, tpb_>>>(d_input_, d_intermediate_);
gpu_reduction_naive<<<1, bpg_>>>(d_intermediate_, d_output_);
// Coalesced GPU implementation
//gpu_reduction_coalesced<<<bpg_, tpb_>>>(d_input_, d_intermediate_);
//gpu_reduction_coalesced<<<1, bpg_>>>(d_intermediate_, d_output_);
// Shared Memory GPU implementation
//gpu_reduction_shmem<<<bpg_, tpb_, tpb_.x * sizeof(float)>>>(d_input_, d_intermediate_);
//gpu_reduction_shmem<<<1, bpg_, bpg_.x * sizeof(float)>>>(d_intermediate_, d_output_);
cudaMemcpy(&h_output_, d_output_, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_input_);
cudaFree(d_intermediate_);
cudaFree(d_output_);
cudaDeviceReset();
std::cout << "Result is: " << h_output_ << "\n";
}
|
1a48660c410a254d3b594c319ee3231649e3b840.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "cu_complex_operation.cuh"
#include <hip/hip_complex.h>
#include <hip/hip_runtime.h>
//#include<helper_cuda.h>
//#include<helper_functions.h>
#include<time.h>
#include<hip/hip_runtime_api.h>
#include"common.h"
//pivot row normal1600ization
__global__ void normalizePivotRow( hipComplex *H, int index, int lda ) {
int tid=threadIdx.x;
__shared__ hipComplex pivotValue;
if(tid==0)
{
// printf("check the propagation matrix\n");
// for(int count1=0;count1<N1;count1++)
// {
// for(int count2=0;count2<2*N1;count2++)
// {
// printf("%0.4f%+0.4fi ", H[IDC2D(count1,count2,lda)].x, H[IDC2D(count1,count2,lda)].y);
// }
// printf("\n");
// }
}
if(tid<lda){
if ( tid == 0 ) // First thread of each block loads pivotValue
{
pivotValue = H[ IDC2D( index, index, lda) ];
}
__syncthreads();
// printf("the pivot value is %0.4f%+0.4fi :\n", pivotValue.x,pivotValue.y);
H[ IDC2D( index, tid, lda )]=complex_div(H[ IDC2D( index, tid, lda )],pivotValue);
__syncthreads();
}
//printf("the thread Id of matrix iverse is:\n");
//printf("%d ", tid);
//printf("the row of the pivot H is:\n");
//printf("%0.4f%+0.4fi ", H[ IDC2D( index, tid, lda )].x,H[ IDC2D( index, tid, lda )].y );
}
//elements update
__global__ void linearMge( hipComplex *matrix, int index, int lda,int BLOCKNUM) {
int tx=blockIdx.x*blockDim.x+threadIdx.x;
int bid =threadIdx.x;
int tid = threadIdx.y;
extern __shared__ hipComplex array[ ];
__shared__ hipComplex zero;
zero.x=0;
zero.y=0;
// extern __shared__ hipComplex matrixPivotValue[];
hipComplex *matrixPivotValue=array;
hipComplex *multColumn=array+blockDim.x;
// int(int((lda)/2)/BLOCKNUM);
hipComplex *matrixRow=array+blockDim.x+lda;
if(tx<int(lda/2))
{
if ( tx!=index ) {
if(tid==0)
{
// Each block loads the value of the pivot Row to be substracted
matrixPivotValue[bid] = matrix[ IDC2D( tx, index, lda )];
// resultPivotValue = result[ IDC2D( index, x, lda )];
matrix[ IDC2D(tx, index, lda )]=zero;
// printf("the zeroing tx is %d:\n", tx);
}
}
else
{
matrixPivotValue[bid]=zero;
}
__syncthreads();
if(tid==0)
{
// printf("\n");
// printf("the pivot column is:\n");
// printf("%0.4f%+0.4fi, %d ",matrix[ IDC2D(tx, index, lda )].x,matrix[ IDC2D(tx, index, lda )].y, tx);
}
if(bid==0)
{
multColumn[ tid ] = matrix[ IDC2D( index, tid, lda )];
}
matrixRow[ IDC2D(bid,tid,lda) ] = matrix[ IDC2D( tx, tid, lda )];
// resultRow[ ty ] = result[ IDC2D( y, x, lda )];
__syncthreads();
// newMatrixValue =matrix[ IDC2D( ty, x, lda )];
if(tid!=index)
{
matrix[ IDC2D(tx, tid, lda) ]=complex_sub(matrixRow[IDC2D(bid,tid,lda)],complex_mulcom( multColumn[tid],matrixPivotValue[bid]));
}
// Copy to the matrix
// matrix[ IDC2D( ty, x, lda) ] = newMatrixValue;
__syncthreads();
// printf("the update value is:\n");
// printf("%0.4f%+0.4fi ",matrix[ IDC2D( index, tid, lda )].x,matrix[ IDC2D( index, tid, lda )].y );
// printf("the index of the whole matrix is %d:\n", tx);
// printf("the index of the matrix in one block is: %d\n", bid);
}
}
__global__ void transfer(
hipComplex *d_H,
hipComplex *R_inv,
int size
)
{
int bid=blockIdx.x*blockDim.x+threadIdx.x;
int tid=threadIdx.y;
if(bid>=0&&bid<size&&tid>=0&&tid<size)
{
R_inv[IDC2D(bid,tid,size)]=d_H[IDC2D(bid,(tid+size),2*size)];
}
__syncthreads();
// if(bid==0&&tid==0)
// {
// printf("the result of transfer is:\n");
// for(int count1=0;count1<size;count1++)
// {//#define BLOCKNUM 4
//Row switching
// for(int count2=0;count2<size;count2++)
// {
// printf("%0.4f%+0.4fi ", R_inv[IDC2D(count1,count2,size)].x,R_inv[IDC2D(count1,count2,size)].y);
// }
// printf("\n");
// }
// }
}
__global__ void initial
(
hipComplex *d_H,
hipComplex *matrix,
int size
)
{
int bid=blockIdx.x*blockDim.x+threadIdx.x;
int tid=threadIdx.y;
if(bid<size)
{
if(tid<size)
{
matrix[IDC2D(bid,tid,2*size)]=d_H[IDC2D(bid,tid,size)];
}
else if(tid==bid+size)
{
matrix[IDC2D(bid,tid,2*size)].x=1;
matrix[IDC2D(bid,tid,2*size)].y=0;
}
else
{
matrix[IDC2D(bid,tid,2*size)].x=0;
matrix[IDC2D(bid,tid,2*size)].y=0;
}
}
__syncthreads();
// if(bid==0&&tid==0)
// {
// printf("the result of initial is:\n");
// for(int count1=0;count1<size;count1++)
// {
// for(int count2=0;count2<2*size;count2++)
// {
// printf("%0.4f%+0.4fi ", matrix[IDC2D(count1,count2,2*size)].x,matrix[IDC2D(count1,count2,2*size)].y);
// }
// printf("\n");
// }
// }
}
void MATRIX_INVERSE(
hipComplex *H, //input square matrix stored in row
hipComplex *R, //the inversion of the matrix H stored in row
int row, // the number of the rows
int column //the number of columns of the H_row
)
{
int BLOCKNUM=16;
hipError_t error;
int count1,count2;
if(column<=8)
{
BLOCKNUM=1;
}
dim3 thread1(ceil(float(column)/float(BLOCKNUM)),2*column);
dim3 thread2(ceil(float(column)/float(BLOCKNUM)),column);
hipComplex *d_matrix;
clock_t start, end;
double duration;
hipMalloc((void**) &d_matrix, column*2*column*sizeof(hipComplex));
start=clock();
hipLaunchKernelGGL(( initial), dim3(BLOCKNUM),dim3(thread1), 0, 0, H,d_matrix,column);
end=clock();
duration=double(end-start);
// error=hipDeviceSynchronize();
if(error!=hipSuccess)
{
printf("error=%s\n",hipGetErrorString(hipGetLastError()));
}
dim3 blockDim(ceil(float(column)/float(BLOCKNUM)),2*column);
start=clock();
for(count1=0; count1<column; count1++)
{
//shared memory to be changed
hipLaunchKernelGGL(( normalizePivotRow), dim3(1),dim3(2*column), 0, 0, d_matrix, count1, 2*column );
// error=hipDeviceSynchronize();
// if(error!=hipSuccess)
// {
printf("error=%s\n",hipGetErrorString(hipGetLastError()));
// }
hipLaunchKernelGGL(( linearMge), dim3(BLOCKNUM),dim3(blockDim),5000*sizeof(hipComplex), 0, d_matrix, count1, 2*column,BLOCKNUM );
// error=hipDeviceSynchronize();
if(error!=hipSuccess)
// {
printf("error=%s\n",hipGetErrorString(hipGetLastError()));
// }
}
end=clock();
duration=double(end-start);
start=clock();
hipLaunchKernelGGL(( transfer), dim3(BLOCKNUM),dim3(thread2), 0, 0, d_matrix,R,column);
end=clock();
duration=double(end-start);
error=hipDeviceSynchronize();
// if(error!=hipSuccess)
// {
printf("error=%s\n",hipGetErrorString(hipGetLastError()));
// }
hipFree(d_matrix);
// free(R);
// free(H);
}
| 1a48660c410a254d3b594c319ee3231649e3b840.cu | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda_runtime.h>
#include "cu_complex_operation.cuh"
#include <cuComplex.h>
#include <cuda.h>
//#include<helper_cuda.h>
//#include<helper_functions.h>
#include<time.h>
#include<cuda_profiler_api.h>
#include"common.h"
//pivot row normal1600ization
__global__ void normalizePivotRow( cuComplex *H, int index, int lda ) {
int tid=threadIdx.x;
__shared__ cuComplex pivotValue;
if(tid==0)
{
// printf("check the propagation matrix\n");
// for(int count1=0;count1<N1;count1++)
// {
// for(int count2=0;count2<2*N1;count2++)
// {
// printf("%0.4f%+0.4fi ", H[IDC2D(count1,count2,lda)].x, H[IDC2D(count1,count2,lda)].y);
// }
// printf("\n");
// }
}
if(tid<lda){
if ( tid == 0 ) // First thread of each block loads pivotValue
{
pivotValue = H[ IDC2D( index, index, lda) ];
}
__syncthreads();
// printf("the pivot value is %0.4f%+0.4fi :\n", pivotValue.x,pivotValue.y);
H[ IDC2D( index, tid, lda )]=complex_div(H[ IDC2D( index, tid, lda )],pivotValue);
__syncthreads();
}
//printf("the thread Id of matrix iverse is:\n");
//printf("%d ", tid);
//printf("the row of the pivot H is:\n");
//printf("%0.4f%+0.4fi ", H[ IDC2D( index, tid, lda )].x,H[ IDC2D( index, tid, lda )].y );
}
//elements update
__global__ void linearMge( cuComplex *matrix, int index, int lda,int BLOCKNUM) {
int tx=blockIdx.x*blockDim.x+threadIdx.x;
int bid =threadIdx.x;
int tid = threadIdx.y;
extern __shared__ cuComplex array[ ];
__shared__ cuComplex zero;
zero.x=0;
zero.y=0;
// extern __shared__ cuComplex matrixPivotValue[];
cuComplex *matrixPivotValue=array;
cuComplex *multColumn=array+blockDim.x;
// int(int((lda)/2)/BLOCKNUM);
cuComplex *matrixRow=array+blockDim.x+lda;
if(tx<int(lda/2))
{
if ( tx!=index ) {
if(tid==0)
{
// Each block loads the value of the pivot Row to be substracted
matrixPivotValue[bid] = matrix[ IDC2D( tx, index, lda )];
// resultPivotValue = result[ IDC2D( index, x, lda )];
matrix[ IDC2D(tx, index, lda )]=zero;
// printf("the zeroing tx is %d:\n", tx);
}
}
else
{
matrixPivotValue[bid]=zero;
}
__syncthreads();
if(tid==0)
{
// printf("\n");
// printf("the pivot column is:\n");
// printf("%0.4f%+0.4fi, %d ",matrix[ IDC2D(tx, index, lda )].x,matrix[ IDC2D(tx, index, lda )].y, tx);
}
if(bid==0)
{
multColumn[ tid ] = matrix[ IDC2D( index, tid, lda )];
}
matrixRow[ IDC2D(bid,tid,lda) ] = matrix[ IDC2D( tx, tid, lda )];
// resultRow[ ty ] = result[ IDC2D( y, x, lda )];
__syncthreads();
// newMatrixValue =matrix[ IDC2D( ty, x, lda )];
if(tid!=index)
{
matrix[ IDC2D(tx, tid, lda) ]=complex_sub(matrixRow[IDC2D(bid,tid,lda)],complex_mulcom( multColumn[tid],matrixPivotValue[bid]));
}
// Copy to the matrix
// matrix[ IDC2D( ty, x, lda) ] = newMatrixValue;
__syncthreads();
// printf("the update value is:\n");
// printf("%0.4f%+0.4fi ",matrix[ IDC2D( index, tid, lda )].x,matrix[ IDC2D( index, tid, lda )].y );
// printf("the index of the whole matrix is %d:\n", tx);
// printf("the index of the matrix in one block is: %d\n", bid);
}
}
__global__ void transfer(
cuComplex *d_H,
cuComplex *R_inv,
int size
)
{
int bid=blockIdx.x*blockDim.x+threadIdx.x;
int tid=threadIdx.y;
if(bid>=0&&bid<size&&tid>=0&&tid<size)
{
R_inv[IDC2D(bid,tid,size)]=d_H[IDC2D(bid,(tid+size),2*size)];
}
__syncthreads();
// if(bid==0&&tid==0)
// {
// printf("the result of transfer is:\n");
// for(int count1=0;count1<size;count1++)
// {//#define BLOCKNUM 4
//Row switching
// for(int count2=0;count2<size;count2++)
// {
// printf("%0.4f%+0.4fi ", R_inv[IDC2D(count1,count2,size)].x,R_inv[IDC2D(count1,count2,size)].y);
// }
// printf("\n");
// }
// }
}
__global__ void initial
(
cuComplex *d_H,
cuComplex *matrix,
int size
)
{
int bid=blockIdx.x*blockDim.x+threadIdx.x;
int tid=threadIdx.y;
if(bid<size)
{
if(tid<size)
{
matrix[IDC2D(bid,tid,2*size)]=d_H[IDC2D(bid,tid,size)];
}
else if(tid==bid+size)
{
matrix[IDC2D(bid,tid,2*size)].x=1;
matrix[IDC2D(bid,tid,2*size)].y=0;
}
else
{
matrix[IDC2D(bid,tid,2*size)].x=0;
matrix[IDC2D(bid,tid,2*size)].y=0;
}
}
__syncthreads();
// if(bid==0&&tid==0)
// {
// printf("the result of initial is:\n");
// for(int count1=0;count1<size;count1++)
// {
// for(int count2=0;count2<2*size;count2++)
// {
// printf("%0.4f%+0.4fi ", matrix[IDC2D(count1,count2,2*size)].x,matrix[IDC2D(count1,count2,2*size)].y);
// }
// printf("\n");
// }
// }
}
void MATRIX_INVERSE(
cuComplex *H, //input square matrix stored in row
cuComplex *R, //the inversion of the matrix H stored in row
int row, // the number of the rows
int column //the number of columns of the H_row
)
{
int BLOCKNUM=16;
cudaError_t error;
int count1,count2;
if(column<=8)
{
BLOCKNUM=1;
}
dim3 thread1(ceil(float(column)/float(BLOCKNUM)),2*column);
dim3 thread2(ceil(float(column)/float(BLOCKNUM)),column);
cuComplex *d_matrix;
clock_t start, end;
double duration;
cudaMalloc((void**) &d_matrix, column*2*column*sizeof(cuComplex));
start=clock();
initial<<<BLOCKNUM,thread1>>>(H,d_matrix,column);
end=clock();
duration=double(end-start);
// error=cudaDeviceSynchronize();
if(error!=cudaSuccess)
{
printf("error=%s\n",cudaGetErrorString(cudaGetLastError()));
}
dim3 blockDim(ceil(float(column)/float(BLOCKNUM)),2*column);
start=clock();
for(count1=0; count1<column; count1++)
{
//shared memory to be changed
normalizePivotRow<<<1,2*column>>>( d_matrix, count1, 2*column );
// error=cudaDeviceSynchronize();
// if(error!=cudaSuccess)
// {
printf("error=%s\n",cudaGetErrorString(cudaGetLastError()));
// }
linearMge<<<BLOCKNUM,blockDim,5000*sizeof(cuComplex)>>>( d_matrix, count1, 2*column,BLOCKNUM );
// error=cudaDeviceSynchronize();
if(error!=cudaSuccess)
// {
printf("error=%s\n",cudaGetErrorString(cudaGetLastError()));
// }
}
end=clock();
duration=double(end-start);
start=clock();
transfer<<<BLOCKNUM,thread2>>>(d_matrix,R,column);
end=clock();
duration=double(end-start);
error=cudaDeviceSynchronize();
// if(error!=cudaSuccess)
// {
printf("error=%s\n",cudaGetErrorString(cudaGetLastError()));
// }
cudaFree(d_matrix);
// free(R);
// free(H);
}
|
55742b23afb0470a684867d882ae2e4f48a94a5b.hip | // !!! This is a file automatically generated by hipify!!!
/*The parallel CUDA code for 2D Ising Model simulation using Metropolis Monte Carlo algorithm
In this implementation, the random numbers are generated on GPU side.
When you install the CUDA environment, you can compile the CUDA code in linux terminal directly:
nvcc ising2.cu -o ising2
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string.h>
// the 2D block size
#define BDIMX 8
#define BDIMY 1
// Monte Carlo sweeps: N Monte Carlo steps - one for each spins, on average
#define sweeps1 6000
#define sweeps2 3000
// function create initial spins on a lattice
void InitialSpins(int *spins, int N, float msg)
{
int i;
float R;
for (i = 0; i < N; i++)
{
R = rand() / (float)(RAND_MAX);
if (R < msg)
{
spins[i] = 1;
}
else
{
spins[i] = -1;
}
}
}
// linSpace Temperature
void linSpaceTemperature(float start, float end, int n, float *Temperature)
{
int i;
float h = (end - start) / (n - 1);
for (i = 0; i < n; i++)
{
Temperature[i] = start + i * h;
}
}
// set the random number generator
// initialize state
__global__ void InitializeState(hiprandState_t *states)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int idx = iy * nx + ix;
hiprand_init(1234, idx, 0, &states[idx]);
}
// GPU random number generator
__global__ void RandGenerator(hiprandState_t *states, float *rand)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int idx = iy * nx + ix;
hiprandState_t localstate = states[idx];
rand[idx] = hiprand_uniform(&localstate);
states[idx] = localstate;
}
/* declare global variable on GPU */
// variables for temporarily storing the properties of one step
__device__ int d_m;
__device__ int d_e;
// variables for summing over all the properties of every step
__device__ int d_M;
__device__ int d_E;
// variables for specific heat and magnetic susceptibility
__device__ float d_M2;
__device__ float d_E2;
// calculate the properties
__global__ void CalcProp(int *energy, int *spins, int size)
{
// map the threads to the global memory
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int idx = iy * nx + ix;
// calculate the properties of the present configuration
atomicAdd(&d_m, spins[idx]);
atomicAdd(&d_e, energy[idx]);
if (idx == 0)
{
d_M += abs(d_m);
d_E += d_e;
d_E2 += (((float)d_e)*d_e)/ (2.0f * 2.0f);
d_M2 += (((float)d_m)*d_m);
d_m = 0;
d_e = 0;
}
}
// reset the variables after every temperature iteration
__global__ void reset()
{
// map the threads to the global memory
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int idx = iy * nx + ix;
if (idx == 0)
{
d_M = 0;
d_E = 0;
d_M2 = 0.;
d_E2 = 0.;
}
}
// flip spins using Metropolis algorithm
__global__ void MetropolisDevice_even(int *spins, int *energy, float *random, const float Beta)
{
// map the threads to the global memory
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
int idx = iy * nx + ix;
float rand = random[idx];
int dE;
int left, right, up, down;
// place the value to neighbours with boundary conditions
if (ix == 0)
{
left = spins[idx + nx - 1];
}
else
{
left = spins[idx - 1];
}
if (ix == (ny - 1))
{
right = spins[idx - nx + 1];
}
else
{
right = spins[idx + 1];
}
if (iy == 0)
{
up = spins[idx + (ny - 1) * nx];
}
else
{
up = spins[idx - nx];
}
if (iy == nx - 1)
{
down = spins[idx - (ny - 1) * nx];
}
else
{
down = spins[idx + nx];
}
if ((ix + iy) % 2 == 0) //flip even spins
{
energy[idx] = -spins[idx] * (left + right + up + down);
dE = -2 * energy[idx];
if (dE < 0 || exp(-dE * Beta) > rand)
{
spins[idx] *= -1;
energy[idx] *= -1;
}
}
}
__global__ void MetropolisDevice_odd(int *spins, int *energy, float *random, const float Beta)
{
// map the threads to the global memory
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
int idx = iy * nx + ix;
float rand = random[idx];
float dE;
int left, right, up, down;
// place the value to neighbours with boundary conditions
if (ix == 0)
{
left = spins[idx + nx - 1];
}
else
{
left = spins[idx - 1];
}
if (ix == ny - 1)
{
right = spins[idx - nx + 1];
}
else
{
right = spins[idx + 1];
}
if (iy == 0)
{
up = spins[idx + (ny - 1) * nx];
}
else
{
up = spins[idx - nx];
}
if (iy == nx - 1)
{
down = spins[idx - (ny - 1) * nx];
}
else
{
down = spins[idx + nx];
}
if ((ix + iy) % 2 != 0) //flip odd spins
{
energy[idx] = -spins[idx] * (left + right + up + down);
dE = -2 * (float)energy[idx];
if (dE < 0 || exp(-dE * Beta) > rand)
{
spins[idx] *= -1;
energy[idx] *= -1;
}
}
}
int main()
{
//latice size
int size = 8;
printf("CUDA program\n");
printf("\n%d x %d size latice \n", size, size);
printf("The random numbers are generated on GPU side\n");
int i, n; // iteration variables
float Beta; // beta = J/KT, in this project set k = 1, J = 1.
//temperature intervel
int numberTemperature = 45; // number of temperatures sampled
float *Temperature = (float*)malloc(numberTemperature * sizeof(float));
linSpaceTemperature(0.5, 5.0, numberTemperature, Temperature);
printf("\nTemperature range 0.5 to 5.0\n");
// massage to define the initial configuration. setting msg = 0.5 to random configuration. setting msg = 0 to orientated configuration.
float msg = 0.5;
// averege energy and magnetization per spin
float *avergEnergy = (float*)malloc(numberTemperature * sizeof(float));
float *avergMag = (float*)malloc(numberTemperature * sizeof(float));
// variables for calculate specific heat and magnetic susceptibility
float *avergEnergy2 = (float*)malloc(numberTemperature * sizeof(float));
float *avergMag2 = (float*)malloc(numberTemperature * sizeof(float));
// specific heat and magnetic susceptibility
float *heat = (float*)malloc(numberTemperature * sizeof(float));
float *sus = (float*)malloc(numberTemperature * sizeof(float));
// declare variables and allocate memory
int *d_spins;
int *h_spins;
int *d_energy;
int *h_energy;
int *gpuRef; // results return from GPU
float *h_random_numbers;
float *d_random_numbers;
int nxy = size * size;
int nBytes = nxy * sizeof(int);
int NBytes = nxy * sizeof(float);
h_spins = (int *)malloc(nBytes);
h_energy = (int *)malloc(nBytes);
gpuRef = (int *)malloc(nBytes);
h_random_numbers = (float *)malloc(NBytes);
//set random number generator seed
srand(123456);
// initialize data at host side
memset(gpuRef, 0, nBytes);
memset(h_energy, 0, nBytes);
InitialSpins(h_spins, nxy, msg);
// malloc device global memory
hipMalloc((void **)&d_spins, nBytes);
hipMalloc((void **)&d_energy, nBytes);
hipMalloc((void **)&d_random_numbers, NBytes);
// transfer data from host to device
int h_m = 0;
int h_e = 0;
int h_M = 0;
int h_E = 0;
float h_M2 = 0.0f;
float h_E2 = 0.0f;
hipMemcpy(d_spins, h_spins, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_energy, h_energy, nBytes, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_M, &h_M, sizeof(int));
hipMemcpyToSymbol(d_E, &h_E, sizeof(int));
hipMemcpyToSymbol(d_m, &h_m, sizeof(int));
hipMemcpyToSymbol(d_e, &h_e, sizeof(int));
hipMemcpyToSymbol(d_M2, &h_M2, sizeof(float));
hipMemcpyToSymbol(d_E2, &h_E2, sizeof(float));
// invoke kernel at host side
dim3 block(BDIMX, BDIMY);
dim3 grid(size / BDIMX, size / BDIMY);
// rand_device
static hiprandState_t *states = NULL;
hipMalloc((void **)&states, sizeof(hiprandState_t) * nxy);
InitializeState << <grid, block >> > (states);
// timing
clock_t d_start, d_end;
double d_time_used;
d_start = clock();
printf("\nMain loop starting...\n");
// main loop (loop over the temerature)
for (n = 0; n < numberTemperature; n++)
{
Beta = 1 / Temperature[n];
// process for equilibrium
for (i = 0; i < sweeps1; i++)
{
RandGenerator << <grid, block >> > (states, d_random_numbers);
MetropolisDevice_even << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta);
MetropolisDevice_odd << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta);
}
// process for calculating the properties
for (i = 0; i < sweeps2; i++)
{
RandGenerator << <grid, block >> > (states, d_random_numbers);
MetropolisDevice_even << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta);
MetropolisDevice_odd << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta);
//printf("Temperature %.3f Iteration %d\n", Temperature[n], i + 1);
hipLaunchKernelGGL(( CalcProp) , dim3(grid), dim3(block) , 0, 0, d_energy, d_spins,size);
//hipDeviceSynchronize();
}
hipMemcpyFromSymbol(&h_M, d_M, sizeof(int));
hipMemcpyFromSymbol(&h_E, d_E, sizeof(int));
hipMemcpyFromSymbol(&h_M2, d_M2, sizeof(float));
hipMemcpyFromSymbol(&h_E2, d_E2, sizeof(float));
// calculate the average propeties per spin
avergEnergy[n] = h_E / ((sweeps2 )*((float)(size*size))*2.0f);
avergMag[n] = h_M / ((sweeps2 )*((float)(size*size)));
avergEnergy2[n] = h_E2 / ((sweeps2 ));
avergMag2[n] = h_M2 / ((sweeps2));
heat[n] = (avergEnergy2[n]/ ((float)(size*size)) - avergEnergy[n] * avergEnergy[n]*((size*size)))*Beta*Beta;
sus[n] = (avergMag2[n]/ ((float)(size*size)) - avergMag[n] * avergMag[n]*(size*size))*Beta;
reset << <grid, block >> > ();
}
d_end = clock();
hipMemcpy(gpuRef, d_spins, nBytes, hipMemcpyDeviceToHost);
d_time_used = ((double)(d_end - d_start)) / CLOCKS_PER_SEC;
printf("\nEnd main loop.\nTotal time using GPU %f s\n", d_time_used);
// deallocate the GPU memory
hipFree(d_random_numbers);
hipFree(d_spins);
hipFree(d_energy);
hipDeviceReset();
FILE *properties;
properties = fopen("Properties_CUDA2.txt", "a");
fprintf(properties, "%d x %d size lattice\n", size, size);
fprintf(properties, "\nTemperature Energy Magnetization Specific heat Magnetic susceptibility (per spin)\n");
for (i = 0; i < numberTemperature; i++)
{
fprintf(properties, "%.2f %.3f %.3f %.3f %.3f \n", Temperature[i], avergEnergy[i], \
avergMag[i], heat[i], sus[i]);
}
fclose(properties);
// print out the properties
printf("\nTemperature Energy Magnetization Specific heat Magnetic susceptibility (per spin)\n");
for (i = 0; i < numberTemperature; i++)
{
printf("%.2f %.3f %.3f %.3f %.3f \n", \
Temperature[i], avergEnergy[i], \
avergMag[i], heat[i], sus[i]);
}
// deallocate the memory
free(h_spins);
free(h_random_numbers);
free(Temperature);
free(h_energy);
printf("end\n");
return 0;
}
| 55742b23afb0470a684867d882ae2e4f48a94a5b.cu | /*The parallel CUDA code for 2D Ising Model simulation using Metropolis Monte Carlo algorithm
In this implementation, the random numbers are generated on GPU side.
When you install the CUDA environment, you can compile the CUDA code in linux terminal directly:
nvcc ising2.cu -o ising2
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string.h>
// the 2D block size
#define BDIMX 8
#define BDIMY 1
// Monte Carlo sweeps: N Monte Carlo steps - one for each spins, on average
#define sweeps1 6000
#define sweeps2 3000
// function create initial spins on a lattice
void InitialSpins(int *spins, int N, float msg)
{
int i;
float R;
for (i = 0; i < N; i++)
{
R = rand() / (float)(RAND_MAX);
if (R < msg)
{
spins[i] = 1;
}
else
{
spins[i] = -1;
}
}
}
// linSpace Temperature
void linSpaceTemperature(float start, float end, int n, float *Temperature)
{
int i;
float h = (end - start) / (n - 1);
for (i = 0; i < n; i++)
{
Temperature[i] = start + i * h;
}
}
// set the random number generator
// initialize state
__global__ void InitializeState(curandState *states)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int idx = iy * nx + ix;
curand_init(1234, idx, 0, &states[idx]);
}
// GPU random number generator
__global__ void RandGenerator(curandState *states, float *rand)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int idx = iy * nx + ix;
curandState localstate = states[idx];
rand[idx] = curand_uniform(&localstate);
states[idx] = localstate;
}
/* declare global variable on GPU */
// variables for temporarily storing the properties of one step
__device__ int d_m;
__device__ int d_e;
// variables for summing over all the properties of every step
__device__ int d_M;
__device__ int d_E;
// variables for specific heat and magnetic susceptibility
__device__ float d_M2;
__device__ float d_E2;
// calculate the properties
__global__ void CalcProp(int *energy, int *spins, int size)
{
// map the threads to the global memory
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int idx = iy * nx + ix;
// calculate the properties of the present configuration
atomicAdd(&d_m, spins[idx]);
atomicAdd(&d_e, energy[idx]);
if (idx == 0)
{
d_M += abs(d_m);
d_E += d_e;
d_E2 += (((float)d_e)*d_e)/ (2.0f * 2.0f);
d_M2 += (((float)d_m)*d_m);
d_m = 0;
d_e = 0;
}
}
// reset the variables after every temperature iteration
__global__ void reset()
{
// map the threads to the global memory
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int idx = iy * nx + ix;
if (idx == 0)
{
d_M = 0;
d_E = 0;
d_M2 = 0.;
d_E2 = 0.;
}
}
// flip spins using Metropolis algorithm
__global__ void MetropolisDevice_even(int *spins, int *energy, float *random, const float Beta)
{
// map the threads to the global memory
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
int idx = iy * nx + ix;
float rand = random[idx];
int dE;
int left, right, up, down;
// place the value to neighbours with boundary conditions
if (ix == 0)
{
left = spins[idx + nx - 1];
}
else
{
left = spins[idx - 1];
}
if (ix == (ny - 1))
{
right = spins[idx - nx + 1];
}
else
{
right = spins[idx + 1];
}
if (iy == 0)
{
up = spins[idx + (ny - 1) * nx];
}
else
{
up = spins[idx - nx];
}
if (iy == nx - 1)
{
down = spins[idx - (ny - 1) * nx];
}
else
{
down = spins[idx + nx];
}
if ((ix + iy) % 2 == 0) //flip even spins
{
energy[idx] = -spins[idx] * (left + right + up + down);
dE = -2 * energy[idx];
if (dE < 0 || exp(-dE * Beta) > rand)
{
spins[idx] *= -1;
energy[idx] *= -1;
}
}
}
__global__ void MetropolisDevice_odd(int *spins, int *energy, float *random, const float Beta)
{
// map the threads to the global memory
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
int idx = iy * nx + ix;
float rand = random[idx];
float dE;
int left, right, up, down;
// place the value to neighbours with boundary conditions
if (ix == 0)
{
left = spins[idx + nx - 1];
}
else
{
left = spins[idx - 1];
}
if (ix == ny - 1)
{
right = spins[idx - nx + 1];
}
else
{
right = spins[idx + 1];
}
if (iy == 0)
{
up = spins[idx + (ny - 1) * nx];
}
else
{
up = spins[idx - nx];
}
if (iy == nx - 1)
{
down = spins[idx - (ny - 1) * nx];
}
else
{
down = spins[idx + nx];
}
if ((ix + iy) % 2 != 0) //flip odd spins
{
energy[idx] = -spins[idx] * (left + right + up + down);
dE = -2 * (float)energy[idx];
if (dE < 0 || exp(-dE * Beta) > rand)
{
spins[idx] *= -1;
energy[idx] *= -1;
}
}
}
int main()
{
//latice size
int size = 8;
printf("CUDA program\n");
printf("\n%d x %d size latice \n", size, size);
printf("The random numbers are generated on GPU side\n");
int i, n; // iteration variables
float Beta; // beta = J/KT, in this project set k = 1, J = 1.
//temperature intervel
int numberTemperature = 45; // number of temperatures sampled
float *Temperature = (float*)malloc(numberTemperature * sizeof(float));
linSpaceTemperature(0.5, 5.0, numberTemperature, Temperature);
printf("\nTemperature range 0.5 to 5.0\n");
// massage to define the initial configuration. setting msg = 0.5 to random configuration. setting msg = 0 to orientated configuration.
float msg = 0.5;
// averege energy and magnetization per spin
float *avergEnergy = (float*)malloc(numberTemperature * sizeof(float));
float *avergMag = (float*)malloc(numberTemperature * sizeof(float));
// variables for calculate specific heat and magnetic susceptibility
float *avergEnergy2 = (float*)malloc(numberTemperature * sizeof(float));
float *avergMag2 = (float*)malloc(numberTemperature * sizeof(float));
// specific heat and magnetic susceptibility
float *heat = (float*)malloc(numberTemperature * sizeof(float));
float *sus = (float*)malloc(numberTemperature * sizeof(float));
// declare variables and allocate memory
int *d_spins;
int *h_spins;
int *d_energy;
int *h_energy;
int *gpuRef; // results return from GPU
float *h_random_numbers;
float *d_random_numbers;
int nxy = size * size;
int nBytes = nxy * sizeof(int);
int NBytes = nxy * sizeof(float);
h_spins = (int *)malloc(nBytes);
h_energy = (int *)malloc(nBytes);
gpuRef = (int *)malloc(nBytes);
h_random_numbers = (float *)malloc(NBytes);
//set random number generator seed
srand(123456);
// initialize data at host side
memset(gpuRef, 0, nBytes);
memset(h_energy, 0, nBytes);
InitialSpins(h_spins, nxy, msg);
// malloc device global memory
cudaMalloc((void **)&d_spins, nBytes);
cudaMalloc((void **)&d_energy, nBytes);
cudaMalloc((void **)&d_random_numbers, NBytes);
// transfer data from host to device
int h_m = 0;
int h_e = 0;
int h_M = 0;
int h_E = 0;
float h_M2 = 0.0f;
float h_E2 = 0.0f;
cudaMemcpy(d_spins, h_spins, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_energy, h_energy, nBytes, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_M, &h_M, sizeof(int));
cudaMemcpyToSymbol(d_E, &h_E, sizeof(int));
cudaMemcpyToSymbol(d_m, &h_m, sizeof(int));
cudaMemcpyToSymbol(d_e, &h_e, sizeof(int));
cudaMemcpyToSymbol(d_M2, &h_M2, sizeof(float));
cudaMemcpyToSymbol(d_E2, &h_E2, sizeof(float));
// invoke kernel at host side
dim3 block(BDIMX, BDIMY);
dim3 grid(size / BDIMX, size / BDIMY);
// rand_device
static curandState *states = NULL;
cudaMalloc((void **)&states, sizeof(curandState) * nxy);
InitializeState << <grid, block >> > (states);
// timing
clock_t d_start, d_end;
double d_time_used;
d_start = clock();
printf("\nMain loop starting...\n");
// main loop (loop over the temerature)
for (n = 0; n < numberTemperature; n++)
{
Beta = 1 / Temperature[n];
// process for equilibrium
for (i = 0; i < sweeps1; i++)
{
RandGenerator << <grid, block >> > (states, d_random_numbers);
MetropolisDevice_even << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta);
MetropolisDevice_odd << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta);
}
// process for calculating the properties
for (i = 0; i < sweeps2; i++)
{
RandGenerator << <grid, block >> > (states, d_random_numbers);
MetropolisDevice_even << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta);
MetropolisDevice_odd << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta);
//printf("Temperature %.3f Iteration %d\n", Temperature[n], i + 1);
CalcProp <<<grid, block >>> (d_energy, d_spins,size);
//cudaDeviceSynchronize();
}
cudaMemcpyFromSymbol(&h_M, d_M, sizeof(int));
cudaMemcpyFromSymbol(&h_E, d_E, sizeof(int));
cudaMemcpyFromSymbol(&h_M2, d_M2, sizeof(float));
cudaMemcpyFromSymbol(&h_E2, d_E2, sizeof(float));
// calculate the average propeties per spin
avergEnergy[n] = h_E / ((sweeps2 )*((float)(size*size))*2.0f);
avergMag[n] = h_M / ((sweeps2 )*((float)(size*size)));
avergEnergy2[n] = h_E2 / ((sweeps2 ));
avergMag2[n] = h_M2 / ((sweeps2));
heat[n] = (avergEnergy2[n]/ ((float)(size*size)) - avergEnergy[n] * avergEnergy[n]*((size*size)))*Beta*Beta;
sus[n] = (avergMag2[n]/ ((float)(size*size)) - avergMag[n] * avergMag[n]*(size*size))*Beta;
reset << <grid, block >> > ();
}
d_end = clock();
cudaMemcpy(gpuRef, d_spins, nBytes, cudaMemcpyDeviceToHost);
d_time_used = ((double)(d_end - d_start)) / CLOCKS_PER_SEC;
printf("\nEnd main loop.\nTotal time using GPU %f s\n", d_time_used);
// deallocate the GPU memory
cudaFree(d_random_numbers);
cudaFree(d_spins);
cudaFree(d_energy);
cudaDeviceReset();
FILE *properties;
properties = fopen("Properties_CUDA2.txt", "a");
fprintf(properties, "%d x %d size lattice\n", size, size);
fprintf(properties, "\nTemperature Energy Magnetization Specific heat Magnetic susceptibility (per spin)\n");
for (i = 0; i < numberTemperature; i++)
{
fprintf(properties, "%.2f %.3f %.3f %.3f %.3f \n", Temperature[i], avergEnergy[i], \
avergMag[i], heat[i], sus[i]);
}
fclose(properties);
// print out the properties
printf("\nTemperature Energy Magnetization Specific heat Magnetic susceptibility (per spin)\n");
for (i = 0; i < numberTemperature; i++)
{
printf("%.2f %.3f %.3f %.3f %.3f \n", \
Temperature[i], avergEnergy[i], \
avergMag[i], heat[i], sus[i]);
}
// deallocate the memory
free(h_spins);
free(h_random_numbers);
free(Temperature);
free(h_energy);
printf("end\n");
return 0;
}
|
efc23064506967378280387f8df60cc981043263.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h> // for rand
#include <stdio.h> // for printf
#include <hip/hip_runtime.h> // for CUDA
#include <time.h>
#define P_WIDTH 60
#define P_HEIGHT 30
#define HEIGHT 1024
#define WIDTH 1024
#define BLOCK_WIDTH 16
#define BLOCK_HEIGHT 16
extern const int field_dim;
extern const size_t field_size;
extern const size_t row_bytes;
// set the grid/block dimensions for kernel execution
extern const dim3 gridDim;
extern const dim3 blocksDim; // 256 threads per block
void cudaCheckError(const char *);
void fill_board(unsigned char *);
void print_board(const unsigned char *);
void cudaCheckError (const char *);
void animate(void (*)(void), const unsigned char *board);
// The two boards -- host only needs one
unsigned char h_current[WIDTH * HEIGHT];
unsigned char *d_current;
unsigned char *d_next;
const dim3 gridDim(8, 8, 1);
const dim3 blocksDim(16, 16, 1); // 256 threads per block
extern "C" __global__ void step (const unsigned char *current, unsigned char *next) {
// coordinates of the cell we're currently evaluating
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// offset index, neighbor coordinates, alive neighbor count
int i, nx, ny, num_neighbors;
const int offsets[8][2] = {{-1, 1},{0, 1},{1, 1},
{-1, 0}, {1, 0},
{-1,-1},{0,-1},{1,-1}};
// count this cell's alive neighbors
num_neighbors = 0;
for (i=0; i<8; i++) {
// To make the board torroidal, we use modular arithmetic to
// wrap neighbor coordinates around to the other side of the
// board if they fall off.
nx = (x + offsets[i][0] + WIDTH) % WIDTH;
ny = (y + offsets[i][1] + HEIGHT) % HEIGHT;
num_neighbors += current[ny * WIDTH + nx]==1;
}
// apply the Game of Life rules to this cell
next[y * WIDTH + x] = ((current[y * WIDTH + x] && num_neighbors==2) || num_neighbors==3);
}
void loop_func() {
hipLaunchKernelGGL(( step), dim3(gridDim), dim3(blocksDim), 0, 0, d_current, d_next);
cudaCheckError("kernel execution");
hipMemcpy(h_current, d_next, field_size, hipMemcpyDeviceToHost);
cudaCheckError("Device->Host memcpy");
hipMemcpy(d_current, d_next, field_size, hipMemcpyDeviceToDevice);
cudaCheckError("Device->Device memcpy");
}
int main(void) {
// allocate the device-side field arrays
hipMalloc((void **)&d_current, field_size);
hipMalloc((void **)&d_next, field_size);
cudaCheckError("device memory allocation");
// Initialize the host-side "current".
fill_board(h_current);
// copy host memory to device
hipMemcpy(d_current, h_current, field_size, hipMemcpyHostToDevice);
cudaCheckError("init array host->device copy");
// run the simulation!
animate(loop_func, h_current);
// free device memory
hipFree(d_current);
hipFree(d_next);
return 0;
} | efc23064506967378280387f8df60cc981043263.cu | #include <stdlib.h> // for rand
#include <stdio.h> // for printf
#include <cuda.h> // for CUDA
#include <time.h>
#define P_WIDTH 60
#define P_HEIGHT 30
#define HEIGHT 1024
#define WIDTH 1024
#define BLOCK_WIDTH 16
#define BLOCK_HEIGHT 16
extern const int field_dim;
extern const size_t field_size;
extern const size_t row_bytes;
// set the grid/block dimensions for kernel execution
extern const dim3 gridDim;
extern const dim3 blocksDim; // 256 threads per block
void cudaCheckError(const char *);
void fill_board(unsigned char *);
void print_board(const unsigned char *);
void cudaCheckError (const char *);
void animate(void (*)(void), const unsigned char *board);
// The two boards -- host only needs one
unsigned char h_current[WIDTH * HEIGHT];
unsigned char *d_current;
unsigned char *d_next;
const dim3 gridDim(8, 8, 1);
const dim3 blocksDim(16, 16, 1); // 256 threads per block
extern "C" __global__ void step (const unsigned char *current, unsigned char *next) {
// coordinates of the cell we're currently evaluating
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// offset index, neighbor coordinates, alive neighbor count
int i, nx, ny, num_neighbors;
const int offsets[8][2] = {{-1, 1},{0, 1},{1, 1},
{-1, 0}, {1, 0},
{-1,-1},{0,-1},{1,-1}};
// count this cell's alive neighbors
num_neighbors = 0;
for (i=0; i<8; i++) {
// To make the board torroidal, we use modular arithmetic to
// wrap neighbor coordinates around to the other side of the
// board if they fall off.
nx = (x + offsets[i][0] + WIDTH) % WIDTH;
ny = (y + offsets[i][1] + HEIGHT) % HEIGHT;
num_neighbors += current[ny * WIDTH + nx]==1;
}
// apply the Game of Life rules to this cell
next[y * WIDTH + x] = ((current[y * WIDTH + x] && num_neighbors==2) || num_neighbors==3);
}
void loop_func() {
step<<<gridDim, blocksDim>>>(d_current, d_next);
cudaCheckError("kernel execution");
cudaMemcpy(h_current, d_next, field_size, cudaMemcpyDeviceToHost);
cudaCheckError("Device->Host memcpy");
cudaMemcpy(d_current, d_next, field_size, cudaMemcpyDeviceToDevice);
cudaCheckError("Device->Device memcpy");
}
int main(void) {
// allocate the device-side field arrays
cudaMalloc((void **)&d_current, field_size);
cudaMalloc((void **)&d_next, field_size);
cudaCheckError("device memory allocation");
// Initialize the host-side "current".
fill_board(h_current);
// copy host memory to device
cudaMemcpy(d_current, h_current, field_size, cudaMemcpyHostToDevice);
cudaCheckError("init array host->device copy");
// run the simulation!
animate(loop_func, h_current);
// free device memory
cudaFree(d_current);
cudaFree(d_next);
return 0;
} |
6b2c51b54de3edea9646ab7a3e341a1e28fd5348.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMode.hip"
#else
#include <thrust/iterator/constant_iterator.h>
void THCTensor_(calculateMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position) {
THAssert(THCTensor_(isContiguous)(state, input));
// Because the input is contiguous, we want to get a reference to the
// location of the buffer at the innermost dimension that we are going
// to calculate the mode for --> we do this by manually doing the stride
// calculations to get an offset
scalar_t *data = THCTensor_(data)(state, input);
for (int i = 0; i < (position->nbytes() / sizeof(int64_t)); ++i) {
data += THLongStorage_data(position)[i] * THTensor_strideLegacyNoScalars(input, i);
}
int64_t nElement = THCTensor_(sizeLegacyNoScalars)(state, input, THCTensor_(nDimensionLegacyAll)(state, input) - 1);
THCThrustAllocator thrustAlloc(state);
// Wrap input data, sortBuffer, in Thrust device vectors
thrust::device_ptr<scalar_t> vecPtr = thrust::device_pointer_cast(data);
thrust::device_vector<scalar_t> iter(vecPtr, vecPtr + nElement);
thrust::device_ptr<int64_t> sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer));
thrust::device_vector<int64_t> seq(sbPtr, sbPtr + nElement);
// Fill sortBuffer with [0, 1, 2, ... nElement - 1]
thrust::sequence(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
#else
thrust::device,
#endif
seq.begin(), seq.end());
// Sort the input data. The original indices of the data are stored in seq
thrust::sort_by_key(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
#else
thrust::device,
#endif
iter.begin(), iter.end(), seq.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfLess()
#endif
);
// Count # of unique elements via an inner product between adjacent elements.
// Add 1 if two neighboring element are not equal.
int unique = 1 + thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
#else
thrust::device,
#endif
iter.begin(), iter.end() - 1, iter.begin() + 1, 0, thrust::plus<int>(),
#if defined(THC_REAL_IS_HALF)
ThrustHalfNotEqualTo()
#else
thrust::not_equal_to<scalar_t>()
#endif
);
// Count frequency of each element
thrust::device_vector<scalar_t> keys(unique);
thrust::device_vector<int> counts(unique);
thrust::reduce_by_key(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
#else
thrust::device,
#endif
iter.begin(), iter.end(),
thrust::constant_iterator<int>(1), keys.begin(), counts.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfEqualTo()
#endif
);
// Find index of maximum count
thrust::device_vector<int>::iterator it = thrust::max_element(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
#else
thrust::device,
#endif
counts.begin(), counts.end());
scalar_t mode = keys[it - counts.begin()];
// Find first index within which it occurs
#if defined(THC_REAL_IS_HALF)
thrust::device_vector<scalar_t>::iterator positionIter = thrust::find_if(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
#else
thrust::device,
#endif
iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode));
#else
thrust::device_vector<scalar_t>::iterator positionIter = thrust::find(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()),
#else
thrust::device,
#endif
iter.begin(), iter.end(), mode);
#endif
THAssert(positionIter != iter.end());
int64_t index = seq[positionIter - iter.begin()];
// Place mode, index in output
ptrdiff_t valuesOffset = THCTensor_(storageOffset)(state, values);
int64_t indicesOffset = THCudaLongTensor_storageOffset(state, indices);
for (int i = 0; i < (position->nbytes() / sizeof(int64_t)); ++i) {
int64_t pos = THLongStorage_data(position)[i];
valuesOffset += THTensor_strideLegacyNoScalars(values, i) * pos;
indicesOffset += THTensor_strideLegacyNoScalars(indices, i) * pos;
}
THCStorage_(set)(state, THCTensor_(storage)(state, values), valuesOffset, mode);
THCudaLongStorage_set(state, THCudaLongTensor_storage(state, indices), indicesOffset, index);
}
// this probably could be a loop, not a recursive algorithm
void THCTensor_(dimApplyMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position,
int curDim) {
int64_t ndim = THCTensor_(nDimensionLegacyAll)(state, input);
// Because we have transposed the Tensor, the data for the dimension we are mode'ing along
// is always in the innermost dimension
if (curDim == ndim - 1) {
THCTensor_(calculateMode)(state, values, indices, input, sortBuffer, dimension, position);
} else {
// Loop through the values and recurse
for (int i = 0; i < THCTensor_(sizeLegacyNoScalars)(state, input, curDim); ++i) {
THLongStorage_data(position)[curDim] = i;
THCTensor_(dimApplyMode)(state, values, indices, input, sortBuffer, dimension, position, curDim + 1);
}
}
}
#define MAX_GRID_SIZE 65535
#define MAX_BLOCK_SIZE 1024
void THCTensor_(mode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
int dimension,
int keepdim) {
THCTensor *transposed, *contiguous, *valuesTransposed;
THLongStorage *position;
THCudaLongStorage *sortBuffer;
THCudaLongTensor *indicesTransposed;
int64_t ndim, sliceSize, slices;
THAssert(THCTensor_(checkGPU)(state, 1, values));
// Verify they are asking for a valid dimension
ndim = THCTensor_(nDimensionLegacyAll)(state, input);
THArgCheck(dimension >= 0 && dimension < ndim, 4, "Dimension of out bounds");
sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dimension);
slices = THCTensor_(nElement)(state, input) / sliceSize;
// Resize output value, index Tensors to appropriate sizes (i.e. the same as
// the input Tensor, except at dim=dimension, the size is 1)
THCTensor_preserveReduceDimSemantics(
state, values, ndim, dimension, keepdim);
THCTensor_preserveReduceDimSemantics(
state, indices, ndim, dimension, keepdim);
std::vector<int64_t> dim = THTensor_sizesLegacyNoScalars(input);
dim[dimension] = 1;
THCTensor_(resize)(state, values, dim, {});
THCudaLongTensor_resize(state, indices, dim, {});
// If sliceSize is 1, copy input to values and set indices
if (sliceSize == 1) {
THCTensor_(copy)(state, values, input);
THCudaLongTensor_fill(state, indices, 0);
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
return;
}
// Requirements for fused kernel implementation:
//
// 1. sliceSize <= 2 * max threads per block
// 2. uses one block per slice, so number of slices must be less than the maximum number of blocks for
// a kernel launch
// 3. Can use 32-bit index math for indexing (mainly just for implementation conciseness, could be changed)
if (sliceSize <= MAX_BLOCK_SIZE &&
slices <= MAX_GRID_SIZE &&
THCTensor_canUse32BitIndexMath(state, input)) {
// Beginning our optimized implementation. First thing we want to do is to transpose
// the input Tensor along the sort dimension, and then make it contiguous
transposed = THCTensor_(newTranspose)(state, input, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim-1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1);
// Set-up TensorInfo structs for passing to kernel
TensorInfo<scalar_t, unsigned int> tiValues = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, valuesTransposed);
TensorInfo<int64_t, unsigned int> tiIndices = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indicesTransposed);
// The number of blocks is the number of slices that we need to calculate the mode for. Each block
// is responsible for computing a single mode
dim3 grid;
THC_getGridFromTiles(slices, grid);
// The blocksize is two elements per thread, rounded up to the nearest power of 2
int64_t ceilPowerOf2 = nextHighestPowerOf2(sliceSize);
// Macro that calls kernel --> note that we set the block dimensions here, and
// the amount of shared memory
#define HANDLE_MODE(SIZE) \
{ \
dim3 blockSize(SIZE / 2); \
\
int memsize = (sizeof(scalar_t) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \
hipLaunchKernelGGL(( computeMode<scalar_t, SIZE>) \
, dim3(grid), dim3(blockSize), memsize, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \
}
// Tradeoff between compilation time and the number of specializations. Ideally we would have
// one HANDLE_MODE for each power of 2
switch(ceilPowerOf2) {
case 2048:
HANDLE_MODE(2048)
break;
case 1024:
case 512:
case 256:
HANDLE_MODE(1024)
break;
case 128:
case 64:
HANDLE_MODE(128)
break;
case 32:
case 16:
case 8:
case 4:
case 2:
HANDLE_MODE(32)
break;
case 1:
default:
assert(false);
}
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, transposed);
THCTensor_(free)(state, contiguous);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
} else {
// Beginning our naive implementation: We don't want to mutate the input Tensor, but
// we need to be able to sort the inputs along the dimension in order to calculate the
// mode. Additionally, its ideal if the data along the dimension is contiguous. So
// we transpose the dimension with the innermost dimension and make a new contiguous
// version that we can use.
transposed = THCTensor_(newClone)(state, input);
THCTensor_(transpose)(state, transposed, NULL, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
THCTensor_(free)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim - 1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim - 1);
// Position is a Storage that will store the dimension values we are processing
position = THLongStorage_newWithSize(ndim - 1);
// Sort Buffer is a Storage that will be used in the internal sort required to calculate
// the mode efficiently
sortBuffer = THCudaLongStorage_newWithSize(state, sliceSize);
// Call mode
THCTensor_(dimApplyMode)(state, valuesTransposed, indicesTransposed, contiguous, sortBuffer, dimension, position, 0);
THCTensor_(free)(state, contiguous);
THLongStorage_free(position);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
THCudaLongStorage_free(state, sortBuffer);
}
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
}
#undef MAX_GRID_SIZE
#undef MAX_BLOCK_SIZE
#endif
| 6b2c51b54de3edea9646ab7a3e341a1e28fd5348.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMode.cu"
#else
#include <thrust/iterator/constant_iterator.h>
void THCTensor_(calculateMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position) {
THAssert(THCTensor_(isContiguous)(state, input));
// Because the input is contiguous, we want to get a reference to the
// location of the buffer at the innermost dimension that we are going
// to calculate the mode for --> we do this by manually doing the stride
// calculations to get an offset
scalar_t *data = THCTensor_(data)(state, input);
for (int i = 0; i < (position->nbytes() / sizeof(int64_t)); ++i) {
data += THLongStorage_data(position)[i] * THTensor_strideLegacyNoScalars(input, i);
}
int64_t nElement = THCTensor_(sizeLegacyNoScalars)(state, input, THCTensor_(nDimensionLegacyAll)(state, input) - 1);
THCThrustAllocator thrustAlloc(state);
// Wrap input data, sortBuffer, in Thrust device vectors
thrust::device_ptr<scalar_t> vecPtr = thrust::device_pointer_cast(data);
thrust::device_vector<scalar_t> iter(vecPtr, vecPtr + nElement);
thrust::device_ptr<int64_t> sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer));
thrust::device_vector<int64_t> seq(sbPtr, sbPtr + nElement);
// Fill sortBuffer with [0, 1, 2, ... nElement - 1]
thrust::sequence(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()),
#else
thrust::device,
#endif
seq.begin(), seq.end());
// Sort the input data. The original indices of the data are stored in seq
thrust::sort_by_key(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()),
#else
thrust::device,
#endif
iter.begin(), iter.end(), seq.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfLess()
#endif
);
// Count # of unique elements via an inner product between adjacent elements.
// Add 1 if two neighboring element are not equal.
int unique = 1 + thrust::inner_product(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()),
#else
thrust::device,
#endif
iter.begin(), iter.end() - 1, iter.begin() + 1, 0, thrust::plus<int>(),
#if defined(THC_REAL_IS_HALF)
ThrustHalfNotEqualTo()
#else
thrust::not_equal_to<scalar_t>()
#endif
);
// Count frequency of each element
thrust::device_vector<scalar_t> keys(unique);
thrust::device_vector<int> counts(unique);
thrust::reduce_by_key(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()),
#else
thrust::device,
#endif
iter.begin(), iter.end(),
thrust::constant_iterator<int>(1), keys.begin(), counts.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfEqualTo()
#endif
);
// Find index of maximum count
thrust::device_vector<int>::iterator it = thrust::max_element(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()),
#else
thrust::device,
#endif
counts.begin(), counts.end());
scalar_t mode = keys[it - counts.begin()];
// Find first index within which it occurs
#if defined(THC_REAL_IS_HALF)
thrust::device_vector<scalar_t>::iterator positionIter = thrust::find_if(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()),
#else
thrust::device,
#endif
iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode));
#else
thrust::device_vector<scalar_t>::iterator positionIter = thrust::find(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()),
#else
thrust::device,
#endif
iter.begin(), iter.end(), mode);
#endif
THAssert(positionIter != iter.end());
int64_t index = seq[positionIter - iter.begin()];
// Place mode, index in output
ptrdiff_t valuesOffset = THCTensor_(storageOffset)(state, values);
int64_t indicesOffset = THCudaLongTensor_storageOffset(state, indices);
for (int i = 0; i < (position->nbytes() / sizeof(int64_t)); ++i) {
int64_t pos = THLongStorage_data(position)[i];
valuesOffset += THTensor_strideLegacyNoScalars(values, i) * pos;
indicesOffset += THTensor_strideLegacyNoScalars(indices, i) * pos;
}
THCStorage_(set)(state, THCTensor_(storage)(state, values), valuesOffset, mode);
THCudaLongStorage_set(state, THCudaLongTensor_storage(state, indices), indicesOffset, index);
}
// this probably could be a loop, not a recursive algorithm
void THCTensor_(dimApplyMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position,
int curDim) {
int64_t ndim = THCTensor_(nDimensionLegacyAll)(state, input);
// Because we have transposed the Tensor, the data for the dimension we are mode'ing along
// is always in the innermost dimension
if (curDim == ndim - 1) {
THCTensor_(calculateMode)(state, values, indices, input, sortBuffer, dimension, position);
} else {
// Loop through the values and recurse
for (int i = 0; i < THCTensor_(sizeLegacyNoScalars)(state, input, curDim); ++i) {
THLongStorage_data(position)[curDim] = i;
THCTensor_(dimApplyMode)(state, values, indices, input, sortBuffer, dimension, position, curDim + 1);
}
}
}
#define MAX_GRID_SIZE 65535
#define MAX_BLOCK_SIZE 1024
void THCTensor_(mode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
int dimension,
int keepdim) {
THCTensor *transposed, *contiguous, *valuesTransposed;
THLongStorage *position;
THCudaLongStorage *sortBuffer;
THCudaLongTensor *indicesTransposed;
int64_t ndim, sliceSize, slices;
THAssert(THCTensor_(checkGPU)(state, 1, values));
// Verify they are asking for a valid dimension
ndim = THCTensor_(nDimensionLegacyAll)(state, input);
THArgCheck(dimension >= 0 && dimension < ndim, 4, "Dimension of out bounds");
sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dimension);
slices = THCTensor_(nElement)(state, input) / sliceSize;
// Resize output value, index Tensors to appropriate sizes (i.e. the same as
// the input Tensor, except at dim=dimension, the size is 1)
THCTensor_preserveReduceDimSemantics(
state, values, ndim, dimension, keepdim);
THCTensor_preserveReduceDimSemantics(
state, indices, ndim, dimension, keepdim);
std::vector<int64_t> dim = THTensor_sizesLegacyNoScalars(input);
dim[dimension] = 1;
THCTensor_(resize)(state, values, dim, {});
THCudaLongTensor_resize(state, indices, dim, {});
// If sliceSize is 1, copy input to values and set indices
if (sliceSize == 1) {
THCTensor_(copy)(state, values, input);
THCudaLongTensor_fill(state, indices, 0);
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
return;
}
// Requirements for fused kernel implementation:
//
// 1. sliceSize <= 2 * max threads per block
// 2. uses one block per slice, so number of slices must be less than the maximum number of blocks for
// a kernel launch
// 3. Can use 32-bit index math for indexing (mainly just for implementation conciseness, could be changed)
if (sliceSize <= MAX_BLOCK_SIZE &&
slices <= MAX_GRID_SIZE &&
THCTensor_canUse32BitIndexMath(state, input)) {
// Beginning our optimized implementation. First thing we want to do is to transpose
// the input Tensor along the sort dimension, and then make it contiguous
transposed = THCTensor_(newTranspose)(state, input, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim-1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1);
// Set-up TensorInfo structs for passing to kernel
TensorInfo<scalar_t, unsigned int> tiValues = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, valuesTransposed);
TensorInfo<int64_t, unsigned int> tiIndices = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indicesTransposed);
// The number of blocks is the number of slices that we need to calculate the mode for. Each block
// is responsible for computing a single mode
dim3 grid;
THC_getGridFromTiles(slices, grid);
// The blocksize is two elements per thread, rounded up to the nearest power of 2
int64_t ceilPowerOf2 = nextHighestPowerOf2(sliceSize);
// Macro that calls kernel --> note that we set the block dimensions here, and
// the amount of shared memory
#define HANDLE_MODE(SIZE) \
{ \
dim3 blockSize(SIZE / 2); \
\
int memsize = (sizeof(scalar_t) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \
computeMode<scalar_t, SIZE> \
<<<grid, blockSize, memsize, c10::cuda::getCurrentCUDAStream()>>>( \
THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \
}
// Tradeoff between compilation time and the number of specializations. Ideally we would have
// one HANDLE_MODE for each power of 2
switch(ceilPowerOf2) {
case 2048:
HANDLE_MODE(2048)
break;
case 1024:
case 512:
case 256:
HANDLE_MODE(1024)
break;
case 128:
case 64:
HANDLE_MODE(128)
break;
case 32:
case 16:
case 8:
case 4:
case 2:
HANDLE_MODE(32)
break;
case 1:
default:
assert(false);
}
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, transposed);
THCTensor_(free)(state, contiguous);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
} else {
// Beginning our naive implementation: We don't want to mutate the input Tensor, but
// we need to be able to sort the inputs along the dimension in order to calculate the
// mode. Additionally, its ideal if the data along the dimension is contiguous. So
// we transpose the dimension with the innermost dimension and make a new contiguous
// version that we can use.
transposed = THCTensor_(newClone)(state, input);
THCTensor_(transpose)(state, transposed, NULL, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
THCTensor_(free)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim - 1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim - 1);
// Position is a Storage that will store the dimension values we are processing
position = THLongStorage_newWithSize(ndim - 1);
// Sort Buffer is a Storage that will be used in the internal sort required to calculate
// the mode efficiently
sortBuffer = THCudaLongStorage_newWithSize(state, sliceSize);
// Call mode
THCTensor_(dimApplyMode)(state, valuesTransposed, indicesTransposed, contiguous, sortBuffer, dimension, position, 0);
THCTensor_(free)(state, contiguous);
THLongStorage_free(position);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
THCudaLongStorage_free(state, sortBuffer);
}
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
}
#undef MAX_GRID_SIZE
#undef MAX_BLOCK_SIZE
#endif
|
8d5057600a2eaaa9840d4e230b8becf0a238eefe.hip | // !!! This is a file automatically generated by hipify!!!
#include "jacketSDK.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "hip/device_functions.h"
#include <iostream>
#define TPB 16
__global__ void ldc_D2Q9_LBGK_ts(float * fOut, float * fIn, float * U,
float * rho_p, const float u_bc,
const float omega,
const int Nx, const int Ny){
int X=threadIdx.x+blockIdx.x*blockDim.x;
int Y=threadIdx.y+blockIdx.y*blockDim.y;
if((X<Nx)&&(Y<Ny)){
int tid=X+Y*Nx;
int nnodes=Nx*Ny;
float fi1,fi2,fi3,fi4,fi5,fi6,fi7,fi8,fi9;
float fe_tmp;
float fo1,fo2,fo3,fo4,fo5,fo6,fo7,fo8,fo9;
//get the density data for the lattice point.
fi1=fIn[tid];
fi2=fIn[nnodes+tid];
fi3=fIn[2*nnodes+tid];
fi4=fIn[3*nnodes+tid];
fi5=fIn[4*nnodes+tid];
fi6=fIn[5*nnodes+tid];
fi7=fIn[6*nnodes+tid];
fi8=fIn[7*nnodes+tid];
fi9=fIn[8*nnodes+tid];
//compute rho
float rho = fi1+fi2+fi3+fi4+fi5+fi6+fi7+fi8+fi9;
rho_p[tid]=rho;
//compute velocity
float ux = (1/rho)*(fi2+fi6+fi9 - (fi7+fi4+fi8));
float uy = (1/rho)*(fi6+fi3+fi7 - (fi8+fi5+fi9));
U[tid]=sqrt(ux*ux+uy*uy);
if((Y==(Ny-1))&&(!((X==0)||(X==(Nx-1))))){//lid node...
ux = u_bc;
uy = 0.0;
rho = (1./(1.+uy))*
((fi1+fi2+fi4)+2.0*(fi3+fi6+fi7));
fi5 = fi3-(2./3.)*rho*uy;
fi9 = fi7+0.5*(fi4-fi2)+
0.5*rho*ux - (1./6.)*rho*uy;
fi8=fi6+0.5*(fi2-fi4)-0.5*rho*ux -
(1./6.)*rho*uy;
}
//compute feq and collide...do it one velocity at a time.
//speed 1
float w = 4./9.;
float cu = 0.;
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo1 = fi1-omega*(fi1-fe_tmp);
//speed 2
w = 1./9.;
cu = 3.0*ux;
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo2 = fi2-omega*(fi2-fe_tmp);
//speed 3
cu = 3.0*uy;
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo3 = fi3-omega*(fi3-fe_tmp);
//speed 4
cu = -3.0*ux;
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo4=fi4-omega*(fi4-fe_tmp);
//speed 5
cu = -3.0*uy;
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo5=fi5-omega*(fi5-fe_tmp);
//speed 6
w = 1./36.;
cu = 3.0*(ux+uy);
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo6 = fi6-omega*(fi6-fe_tmp);
//speed 7
cu = 3.0*(-ux+uy);
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo7=fi7-omega*(fi7-fe_tmp);
//speed 8
cu = 3.0*(-ux-uy);
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo8=fi8-omega*(fi8-fe_tmp);
//speed 9
cu= 3.0*(ux-uy);
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo9=fi9-omega*(fi9-fe_tmp);
if(((X==0)||(X==(Nx-1))||(Y==0))){//solid node
fo1=fi1;
fo2=fi4; fo4=fi2;
fo3=fi5; fo5=fi3;
fo6=fi8; fo8=fi6;
fo7=fi9; fo9=fi7;
ux = 0.; uy = 0.;
}
// stream the result...
//compute the local stream vector...
int x;
int y;
int yn;
int ys;
int xe;
int xw;
//int dir;
int dof_num; //int f_num;
x = tid%Nx+1;
y = ((tid+1)-x+1)/Nx + 1;
yn = y%Ny+1;
xe = x%Nx+1;
if(y==1){
ys = Ny;
}else{
ys = y-1;
}
if(x==1){
xw=Nx;
}else{
xw=x-1;
}
dof_num = Nx*(y-1)+x;
fOut[dof_num-1]=fo1;
dof_num=Nx*(y-1)+xe;
fOut[nnodes+dof_num-1]=fo2;
dof_num=Nx*(yn-1)+x;
fOut[2*nnodes+dof_num-1]=fo3;
dof_num=Nx*(y-1)+xw;
fOut[3*nnodes+dof_num-1]=fo4;
dof_num=Nx*(ys-1)+x;
fOut[4*nnodes+dof_num-1]=fo5;
dof_num=Nx*(yn-1)+xe;
fOut[5*nnodes+dof_num-1]=fo6;
dof_num=Nx*(yn-1)+xw;
fOut[6*nnodes+dof_num-1]=fo7;
dof_num=Nx*(ys-1)+xw;
fOut[7*nnodes+dof_num-1]=fo8;
dof_num=Nx*(ys-1)+xe;
fOut[8*nnodes+dof_num-1]=fo9;
}//if(X<Nx...
}
err_t jktFunction(int nlhs,mxArray * plhs[], int nrhs,mxArray * prhs[]){
mxArray * m_fOut=prhs[0];
mxArray * m_fIn=prhs[1];
mxArray * m_U = prhs[2];
mxArray * m_rho =prhs[3];
float u_bc=mxGetScalar(prhs[4]);
float omega=mxGetScalar(prhs[5]);
int Nx = mxGetScalar(prhs[6]);
int Ny =mxGetScalar(prhs[7]);
float * fOut_d;
float * fIn_d;
float * U_d;
float * rho_d;
jkt_mem((void**)&fOut_d,m_fOut);
jkt_mem((void**)&fIn_d,m_fIn);
jkt_mem((void**)&U_d,m_U);
jkt_mem((void**)&rho_d,m_rho);
dim3 BLOCKS(TPB,TPB,1);
dim3 GRIDS((Nx+TPB-1)/TPB,(Ny+TPB-1)/TPB,1);
hipLaunchKernelGGL(( ldc_D2Q9_LBGK_ts), dim3(GRIDS),dim3(BLOCKS), 0, 0, fOut_d,fIn_d,U_d,rho_d,u_bc,omega,Nx,Ny);
return errNone;
}
| 8d5057600a2eaaa9840d4e230b8becf0a238eefe.cu | #include "jacketSDK.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_functions.h"
#include <iostream>
#define TPB 16
__global__ void ldc_D2Q9_LBGK_ts(float * fOut, float * fIn, float * U,
float * rho_p, const float u_bc,
const float omega,
const int Nx, const int Ny){
int X=threadIdx.x+blockIdx.x*blockDim.x;
int Y=threadIdx.y+blockIdx.y*blockDim.y;
if((X<Nx)&&(Y<Ny)){
int tid=X+Y*Nx;
int nnodes=Nx*Ny;
float fi1,fi2,fi3,fi4,fi5,fi6,fi7,fi8,fi9;
float fe_tmp;
float fo1,fo2,fo3,fo4,fo5,fo6,fo7,fo8,fo9;
//get the density data for the lattice point.
fi1=fIn[tid];
fi2=fIn[nnodes+tid];
fi3=fIn[2*nnodes+tid];
fi4=fIn[3*nnodes+tid];
fi5=fIn[4*nnodes+tid];
fi6=fIn[5*nnodes+tid];
fi7=fIn[6*nnodes+tid];
fi8=fIn[7*nnodes+tid];
fi9=fIn[8*nnodes+tid];
//compute rho
float rho = fi1+fi2+fi3+fi4+fi5+fi6+fi7+fi8+fi9;
rho_p[tid]=rho;
//compute velocity
float ux = (1/rho)*(fi2+fi6+fi9 - (fi7+fi4+fi8));
float uy = (1/rho)*(fi6+fi3+fi7 - (fi8+fi5+fi9));
U[tid]=sqrt(ux*ux+uy*uy);
if((Y==(Ny-1))&&(!((X==0)||(X==(Nx-1))))){//lid node...
ux = u_bc;
uy = 0.0;
rho = (1./(1.+uy))*
((fi1+fi2+fi4)+2.0*(fi3+fi6+fi7));
fi5 = fi3-(2./3.)*rho*uy;
fi9 = fi7+0.5*(fi4-fi2)+
0.5*rho*ux - (1./6.)*rho*uy;
fi8=fi6+0.5*(fi2-fi4)-0.5*rho*ux -
(1./6.)*rho*uy;
}
//compute feq and collide...do it one velocity at a time.
//speed 1
float w = 4./9.;
float cu = 0.;
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo1 = fi1-omega*(fi1-fe_tmp);
//speed 2
w = 1./9.;
cu = 3.0*ux;
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo2 = fi2-omega*(fi2-fe_tmp);
//speed 3
cu = 3.0*uy;
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo3 = fi3-omega*(fi3-fe_tmp);
//speed 4
cu = -3.0*ux;
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo4=fi4-omega*(fi4-fe_tmp);
//speed 5
cu = -3.0*uy;
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo5=fi5-omega*(fi5-fe_tmp);
//speed 6
w = 1./36.;
cu = 3.0*(ux+uy);
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo6 = fi6-omega*(fi6-fe_tmp);
//speed 7
cu = 3.0*(-ux+uy);
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo7=fi7-omega*(fi7-fe_tmp);
//speed 8
cu = 3.0*(-ux-uy);
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo8=fi8-omega*(fi8-fe_tmp);
//speed 9
cu= 3.0*(ux-uy);
fe_tmp = w*rho*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux + uy*uy));
fo9=fi9-omega*(fi9-fe_tmp);
if(((X==0)||(X==(Nx-1))||(Y==0))){//solid node
fo1=fi1;
fo2=fi4; fo4=fi2;
fo3=fi5; fo5=fi3;
fo6=fi8; fo8=fi6;
fo7=fi9; fo9=fi7;
ux = 0.; uy = 0.;
}
// stream the result...
//compute the local stream vector...
int x;
int y;
int yn;
int ys;
int xe;
int xw;
//int dir;
int dof_num; //int f_num;
x = tid%Nx+1;
y = ((tid+1)-x+1)/Nx + 1;
yn = y%Ny+1;
xe = x%Nx+1;
if(y==1){
ys = Ny;
}else{
ys = y-1;
}
if(x==1){
xw=Nx;
}else{
xw=x-1;
}
dof_num = Nx*(y-1)+x;
fOut[dof_num-1]=fo1;
dof_num=Nx*(y-1)+xe;
fOut[nnodes+dof_num-1]=fo2;
dof_num=Nx*(yn-1)+x;
fOut[2*nnodes+dof_num-1]=fo3;
dof_num=Nx*(y-1)+xw;
fOut[3*nnodes+dof_num-1]=fo4;
dof_num=Nx*(ys-1)+x;
fOut[4*nnodes+dof_num-1]=fo5;
dof_num=Nx*(yn-1)+xe;
fOut[5*nnodes+dof_num-1]=fo6;
dof_num=Nx*(yn-1)+xw;
fOut[6*nnodes+dof_num-1]=fo7;
dof_num=Nx*(ys-1)+xw;
fOut[7*nnodes+dof_num-1]=fo8;
dof_num=Nx*(ys-1)+xe;
fOut[8*nnodes+dof_num-1]=fo9;
}//if(X<Nx...
}
err_t jktFunction(int nlhs,mxArray * plhs[], int nrhs,mxArray * prhs[]){
mxArray * m_fOut=prhs[0];
mxArray * m_fIn=prhs[1];
mxArray * m_U = prhs[2];
mxArray * m_rho =prhs[3];
float u_bc=mxGetScalar(prhs[4]);
float omega=mxGetScalar(prhs[5]);
int Nx = mxGetScalar(prhs[6]);
int Ny =mxGetScalar(prhs[7]);
float * fOut_d;
float * fIn_d;
float * U_d;
float * rho_d;
jkt_mem((void**)&fOut_d,m_fOut);
jkt_mem((void**)&fIn_d,m_fIn);
jkt_mem((void**)&U_d,m_U);
jkt_mem((void**)&rho_d,m_rho);
dim3 BLOCKS(TPB,TPB,1);
dim3 GRIDS((Nx+TPB-1)/TPB,(Ny+TPB-1)/TPB,1);
ldc_D2Q9_LBGK_ts<<<GRIDS,BLOCKS>>>(fOut_d,fIn_d,U_d,rho_d,u_bc,omega,Nx,Ny);
return errNone;
}
|
3337a9c766055bd203e7366e1ff1f0a65a2b6e94.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "nvstrings/NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
//
NVStrings* NVStrings::lower()
{
unsigned int count = size();
if( count==0 )
return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
unsigned char* d_flags = get_unicode_flags();
unsigned short* d_cases = get_charcases();
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int chw = custring_view::bytes_in_char(chr);
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( IS_UPPER(flg) )
chw = custring_view::bytes_in_char(u2u8(d_cases[uni]));
bytes += chw;
}
d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size(bytes,dstr->chars_count()));
}
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
return rtn;
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
char* buffer = d_buffer + d_offsets[idx];
char* ptr = buffer;
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( IS_UPPER(flg) )
chr = u2u8(d_cases[uni]);
unsigned int chw = custring_view::Char_to_char(chr,ptr);
ptr += chw;
bytes += chw;
}
d_results[idx] = custring_view::create_from(buffer,buffer,bytes);
}
});
//
return rtn;
}
//
NVStrings* NVStrings::upper()
{
unsigned int count = size();
if( count==0 )
return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
unsigned char* d_flags = get_unicode_flags();
unsigned short* d_cases = get_charcases();
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int chw = custring_view::bytes_in_char(chr);
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( IS_LOWER(flg) )
chw = custring_view::bytes_in_char(u2u8(d_cases[uni]));
bytes += chw;
}
d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size(bytes,dstr->chars_count()));
}
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
return rtn;
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
size_t* d_offsets = offsets.data().get();
// do the thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
char* ptr = buffer;
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int uni = u82u(*itr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( IS_LOWER(flg) )
chr = u2u8(d_cases[uni]);
int chw = custring_view::Char_to_char(chr,ptr);
ptr += chw;
bytes += chw;
}
d_results[idx] = custring_view::create_from(buffer,buffer,bytes);
});
//
return rtn;
}
//
NVStrings* NVStrings::swapcase()
{
unsigned int count = size();
if( count==0 )
return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
unsigned char* d_flags = get_unicode_flags();
unsigned short* d_cases = get_charcases();
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int chw = custring_view::bytes_in_char(chr);
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( IS_LOWER(flg) || IS_UPPER(flg) )
chw = custring_view::bytes_in_char(u2u8(d_cases[uni]));
bytes += chw;
}
d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size(bytes,dstr->chars_count()));
}
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
return rtn;
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
char* buffer = d_buffer + d_offsets[idx];
char* ptr = buffer;
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int uni = u82u(*itr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( IS_LOWER(flg) || IS_UPPER(flg) )
chr = u2u8(d_cases[uni]);
int chw = custring_view::Char_to_char(chr,ptr);
ptr += chw;
bytes += chw;
}
d_results[idx] = custring_view::create_from(buffer,buffer,bytes);
}
});
//
return rtn;
}
//
NVStrings* NVStrings::capitalize()
{
unsigned int count = size();
if( count==0 )
return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
unsigned char* d_flags = get_unicode_flags();
unsigned short* d_cases = get_charcases();
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int chw = custring_view::bytes_in_char(chr);
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( (bytes && IS_UPPER(flg)) || (!bytes && IS_LOWER(flg)) )
{
uni = (uni <= 0x00FFF ? d_cases[uni] : uni);
chr = u2u8(uni);
chw = custring_view::bytes_in_char(chr);
}
bytes += chw;
}
d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size(bytes,dstr->chars_count()));
}
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
return rtn;
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
char* buffer = d_buffer + d_offsets[idx];
char* ptr = buffer;
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( (bytes && IS_UPPER(flg)) || (!bytes && IS_LOWER(flg)) )
{
uni = (uni <= 0x00FFF ? d_cases[uni] : uni);
chr = u2u8(uni);
}
unsigned int chw = custring_view::Char_to_char(chr,ptr);
ptr += chw;
bytes += chw;
}
d_results[idx] = custring_view::create_from(buffer,buffer,bytes);
}
});
//
return rtn;
}
// returns titlecase for each string
NVStrings* NVStrings::title()
{
unsigned int count = size();
if( count==0 )
return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
unsigned char* d_flags = get_unicode_flags();
unsigned short* d_cases = get_charcases();
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
int bytes = 0;
bool bcapnext = true;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( !IS_ALPHA(flg) )
{
bcapnext = true;
bytes += custring_view::bytes_in_char(chr);
continue;
}
if( (bcapnext && IS_LOWER(flg)) || (!bcapnext && IS_UPPER(flg)) )
uni = (unsigned int)(uni <= 0x00FFFF ? d_cases[uni] : uni);
bcapnext = false;
bytes += custring_view::bytes_in_char(u2u8(uni));
}
d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size(bytes,dstr->chars_count()));
}
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
return rtn;
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the title thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
char* buffer = d_buffer + d_offsets[idx];
char* ptr = buffer;
int bytes = 0;
bool bcapnext = true;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( !IS_ALPHA(flg) )
bcapnext = true;
else
{
if( (bcapnext && IS_LOWER(flg)) || (!bcapnext && IS_UPPER(flg)) )
{
uni = (unsigned int)(uni <= 0x00FFFF ? d_cases[uni] : uni);
chr = u2u8(uni);
}
bcapnext = false;
}
int chw = custring_view::Char_to_char(chr,ptr);
bytes += chw;
ptr += chw;
}
d_results[idx] = custring_view::create_from(buffer,buffer,bytes);
}
});
//
return rtn;
}
| 3337a9c766055bd203e7366e1ff1f0a65a2b6e94.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "nvstrings/NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
//
NVStrings* NVStrings::lower()
{
unsigned int count = size();
if( count==0 )
return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
unsigned char* d_flags = get_unicode_flags();
unsigned short* d_cases = get_charcases();
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int chw = custring_view::bytes_in_char(chr);
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( IS_UPPER(flg) )
chw = custring_view::bytes_in_char(u2u8(d_cases[uni]));
bytes += chw;
}
d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size(bytes,dstr->chars_count()));
}
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
return rtn;
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
char* buffer = d_buffer + d_offsets[idx];
char* ptr = buffer;
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( IS_UPPER(flg) )
chr = u2u8(d_cases[uni]);
unsigned int chw = custring_view::Char_to_char(chr,ptr);
ptr += chw;
bytes += chw;
}
d_results[idx] = custring_view::create_from(buffer,buffer,bytes);
}
});
//
return rtn;
}
//
NVStrings* NVStrings::upper()
{
unsigned int count = size();
if( count==0 )
return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
unsigned char* d_flags = get_unicode_flags();
unsigned short* d_cases = get_charcases();
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int chw = custring_view::bytes_in_char(chr);
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( IS_LOWER(flg) )
chw = custring_view::bytes_in_char(u2u8(d_cases[uni]));
bytes += chw;
}
d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size(bytes,dstr->chars_count()));
}
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
return rtn;
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
size_t* d_offsets = offsets.data().get();
// do the thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
char* buffer = d_buffer + d_offsets[idx];
char* ptr = buffer;
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int uni = u82u(*itr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( IS_LOWER(flg) )
chr = u2u8(d_cases[uni]);
int chw = custring_view::Char_to_char(chr,ptr);
ptr += chw;
bytes += chw;
}
d_results[idx] = custring_view::create_from(buffer,buffer,bytes);
});
//
return rtn;
}
//
NVStrings* NVStrings::swapcase()
{
unsigned int count = size();
if( count==0 )
return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
unsigned char* d_flags = get_unicode_flags();
unsigned short* d_cases = get_charcases();
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int chw = custring_view::bytes_in_char(chr);
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( IS_LOWER(flg) || IS_UPPER(flg) )
chw = custring_view::bytes_in_char(u2u8(d_cases[uni]));
bytes += chw;
}
d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size(bytes,dstr->chars_count()));
}
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
return rtn;
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
char* buffer = d_buffer + d_offsets[idx];
char* ptr = buffer;
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int uni = u82u(*itr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( IS_LOWER(flg) || IS_UPPER(flg) )
chr = u2u8(d_cases[uni]);
int chw = custring_view::Char_to_char(chr,ptr);
ptr += chw;
bytes += chw;
}
d_results[idx] = custring_view::create_from(buffer,buffer,bytes);
}
});
//
return rtn;
}
//
NVStrings* NVStrings::capitalize()
{
unsigned int count = size();
if( count==0 )
return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
unsigned char* d_flags = get_unicode_flags();
unsigned short* d_cases = get_charcases();
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int chw = custring_view::bytes_in_char(chr);
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( (bytes && IS_UPPER(flg)) || (!bytes && IS_LOWER(flg)) )
{
uni = (uni <= 0x00FFF ? d_cases[uni] : uni);
chr = u2u8(uni);
chw = custring_view::bytes_in_char(chr);
}
bytes += chw;
}
d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size(bytes,dstr->chars_count()));
}
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
return rtn;
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
char* buffer = d_buffer + d_offsets[idx];
char* ptr = buffer;
unsigned int bytes = 0;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( (bytes && IS_UPPER(flg)) || (!bytes && IS_LOWER(flg)) )
{
uni = (uni <= 0x00FFF ? d_cases[uni] : uni);
chr = u2u8(uni);
}
unsigned int chw = custring_view::Char_to_char(chr,ptr);
ptr += chw;
bytes += chw;
}
d_results[idx] = custring_view::create_from(buffer,buffer,bytes);
}
});
//
return rtn;
}
// returns titlecase for each string
NVStrings* NVStrings::title()
{
unsigned int count = size();
if( count==0 )
return new NVStrings(0);
auto execpol = rmm::exec_policy(0);
custring_view_array d_strings = pImpl->getStringsPtr();
unsigned char* d_flags = get_unicode_flags();
unsigned short* d_cases = get_charcases();
// compute size of output buffer
rmm::device_vector<size_t> lengths(count,0);
size_t* d_lengths = lengths.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_lengths] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
int bytes = 0;
bool bcapnext = true;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( !IS_ALPHA(flg) )
{
bcapnext = true;
bytes += custring_view::bytes_in_char(chr);
continue;
}
if( (bcapnext && IS_LOWER(flg)) || (!bcapnext && IS_UPPER(flg)) )
uni = (unsigned int)(uni <= 0x00FFFF ? d_cases[uni] : uni);
bcapnext = false;
bytes += custring_view::bytes_in_char(u2u8(uni));
}
d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size(bytes,dstr->chars_count()));
}
});
// create output object
NVStrings* rtn = new NVStrings(count);
char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths);
if( d_buffer==0 )
return rtn;
// create offsets
rmm::device_vector<size_t> offsets(count,0);
thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin());
// do the title thing
custring_view_array d_results = rtn->pImpl->getStringsPtr();
size_t* d_offsets = offsets.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_cases, d_buffer, d_offsets, d_results] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
{
char* buffer = d_buffer + d_offsets[idx];
char* ptr = buffer;
int bytes = 0;
bool bcapnext = true;
for( auto itr = dstr->begin(); (itr != dstr->end()); itr++ )
{
Char chr = *itr;
unsigned int uni = u82u(chr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
if( !IS_ALPHA(flg) )
bcapnext = true;
else
{
if( (bcapnext && IS_LOWER(flg)) || (!bcapnext && IS_UPPER(flg)) )
{
uni = (unsigned int)(uni <= 0x00FFFF ? d_cases[uni] : uni);
chr = u2u8(uni);
}
bcapnext = false;
}
int chw = custring_view::Char_to_char(chr,ptr);
bytes += chw;
ptr += chw;
}
d_results[idx] = custring_view::create_from(buffer,buffer,bytes);
}
});
//
return rtn;
}
|
58895c8ab0dd5b37bad2a485af80482d135a1648.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/math_unary_elementwise_func.h"
namespace oneflow {
namespace {
template<template<typename> class UnaryFunctor, typename T>
__global__ void MathUnaryElementwiseForwardGpu(const int n, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = UnaryFunctor<T>::Forward(x[i]); }
}
template<template<typename> class UnaryFunctor, typename T>
__global__ void MathUnaryElementwiseBackwardGpu(const int n, const T* x, const T* dy, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = UnaryFunctor<T>::Backward(x[i], dy[i]); }
}
} // namespace
template<template<typename> class UnaryFunctor, typename T>
class MathUnaryElementwiseGpuKernel final : public user_op::OpKernel {
public:
MathUnaryElementwiseGpuKernel() = default;
~MathUnaryElementwiseGpuKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0);
const T* x = tensor_x->dptr<T>();
T* y = tensor_y->mut_dptr<T>();
int64_t n = tensor_x->shape().elem_cnt();
CHECK_LE(n, GetMaxVal<int32_t>() / 2);
if (n == 0) { return; }
hipLaunchKernelGGL(( MathUnaryElementwiseForwardGpu<UnaryFunctor, T>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
n, x, y);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
template<template<typename> class UnaryFunctor, typename T>
class MathUnaryElementwiseGradGpuKernel final : public user_op::OpKernel {
public:
MathUnaryElementwiseGradGpuKernel() = default;
~MathUnaryElementwiseGradGpuKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* tensor_dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* tensor_dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const T* x = tensor_x->dptr<T>();
const T* dy = tensor_dy->dptr<T>();
T* dx = tensor_dx->mut_dptr<T>();
int64_t n = tensor_x->shape().elem_cnt();
CHECK_LE(n, GetMaxVal<int32_t>() / 2);
if (n == 0) { return; }
hipLaunchKernelGGL(( MathUnaryElementwiseBackwardGpu<UnaryFunctor, T>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
n, x, dy, dx);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MATH_UNARY_ELEMENTWISE_GPU_KERNEL_AND_GRAD(math_type_pair, data_type_pair) \
REGISTER_USER_KERNEL(OF_PP_PAIR_FIRST(math_type_pair)) \
.SetCreateFn< \
MathUnaryElementwiseGpuKernel<OF_PP_CAT(OF_PP_PAIR_SECOND(math_type_pair), Functor), \
OF_PP_PAIR_FIRST(data_type_pair)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(data_type_pair)) \
& (user_op::HobDataType("y", 0) == OF_PP_PAIR_SECOND(data_type_pair))); \
\
REGISTER_USER_KERNEL((std::string("") + OF_PP_PAIR_FIRST(math_type_pair) + "_grad")) \
.SetCreateFn< \
MathUnaryElementwiseGradGpuKernel<OF_PP_CAT(OF_PP_PAIR_SECOND(math_type_pair), Functor), \
OF_PP_PAIR_FIRST(data_type_pair)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(data_type_pair)));
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_MATH_UNARY_ELEMENTWISE_GPU_KERNEL_AND_GRAD,
MATH_UNARY_ELEMENTWISE_FUNC_SEQ, FLOATING_DATA_TYPE_SEQ)
template<template<typename> class UnaryFunctor>
class MathUnaryElementwiseGpuHalfKernel final : public user_op::OpKernel {
public:
MathUnaryElementwiseGpuHalfKernel() = default;
~MathUnaryElementwiseGpuHalfKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0);
const half* x = reinterpret_cast<const half*>(tensor_x->dptr<float16>());
half* y = reinterpret_cast<half*>(tensor_y->mut_dptr<float16>());
int64_t n = tensor_x->shape().elem_cnt();
CHECK_LE(n, GetMaxVal<int32_t>() / 2);
if (n == 0) { return; }
hipLaunchKernelGGL(( MathUnaryElementwiseForwardGpu<UnaryFunctor, half>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
n, x, y);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
template<template<typename> class UnaryFunctor>
class MathUnaryElementwiseGradGpuHalfKernel final : public user_op::OpKernel {
public:
MathUnaryElementwiseGradGpuHalfKernel() = default;
~MathUnaryElementwiseGradGpuHalfKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* tensor_dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* tensor_dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const half* x = reinterpret_cast<const half*>(tensor_x->dptr<float16>());
const half* dy = reinterpret_cast<const half*>(tensor_dy->dptr<float16>());
half* dx = reinterpret_cast<half*>(tensor_dx->mut_dptr<float16>());
int64_t n = tensor_x->shape().elem_cnt();
CHECK_LE(n, GetMaxVal<int32_t>() / 2);
if (n == 0) { return; }
hipLaunchKernelGGL(( MathUnaryElementwiseBackwardGpu<UnaryFunctor, half>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
n, x, dy, dx);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MATH_UNARY_ELEMENTWISE_GPU_HALF_KERNEL_AND_GRAD(math_type_str, math_func_prefix) \
REGISTER_USER_KERNEL(math_type_str) \
.SetCreateFn<MathUnaryElementwiseGpuHalfKernel<OF_PP_CAT(math_func_prefix, Functor)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == DataType::kFloat16) \
& (user_op::HobDataType("y", 0) == DataType::kFloat16)); \
\
REGISTER_USER_KERNEL((std::string("") + math_type_str + "_grad")) \
.SetCreateFn<MathUnaryElementwiseGradGpuHalfKernel<OF_PP_CAT(math_func_prefix, Functor)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == DataType::kFloat16));
OF_PP_FOR_EACH_TUPLE(REGISTER_MATH_UNARY_ELEMENTWISE_GPU_HALF_KERNEL_AND_GRAD,
MATH_UNARY_ELEMENTWISE_FUNC_SEQ)
} // namespace oneflow
| 58895c8ab0dd5b37bad2a485af80482d135a1648.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/math_unary_elementwise_func.h"
namespace oneflow {
namespace {
template<template<typename> class UnaryFunctor, typename T>
__global__ void MathUnaryElementwiseForwardGpu(const int n, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = UnaryFunctor<T>::Forward(x[i]); }
}
template<template<typename> class UnaryFunctor, typename T>
__global__ void MathUnaryElementwiseBackwardGpu(const int n, const T* x, const T* dy, T* dx) {
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = UnaryFunctor<T>::Backward(x[i], dy[i]); }
}
} // namespace
template<template<typename> class UnaryFunctor, typename T>
class MathUnaryElementwiseGpuKernel final : public user_op::OpKernel {
public:
MathUnaryElementwiseGpuKernel() = default;
~MathUnaryElementwiseGpuKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0);
const T* x = tensor_x->dptr<T>();
T* y = tensor_y->mut_dptr<T>();
int64_t n = tensor_x->shape().elem_cnt();
CHECK_LE(n, GetMaxVal<int32_t>() / 2);
if (n == 0) { return; }
MathUnaryElementwiseForwardGpu<UnaryFunctor, T>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
n, x, y);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
template<template<typename> class UnaryFunctor, typename T>
class MathUnaryElementwiseGradGpuKernel final : public user_op::OpKernel {
public:
MathUnaryElementwiseGradGpuKernel() = default;
~MathUnaryElementwiseGradGpuKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* tensor_dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* tensor_dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const T* x = tensor_x->dptr<T>();
const T* dy = tensor_dy->dptr<T>();
T* dx = tensor_dx->mut_dptr<T>();
int64_t n = tensor_x->shape().elem_cnt();
CHECK_LE(n, GetMaxVal<int32_t>() / 2);
if (n == 0) { return; }
MathUnaryElementwiseBackwardGpu<UnaryFunctor, T>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
n, x, dy, dx);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MATH_UNARY_ELEMENTWISE_GPU_KERNEL_AND_GRAD(math_type_pair, data_type_pair) \
REGISTER_USER_KERNEL(OF_PP_PAIR_FIRST(math_type_pair)) \
.SetCreateFn< \
MathUnaryElementwiseGpuKernel<OF_PP_CAT(OF_PP_PAIR_SECOND(math_type_pair), Functor), \
OF_PP_PAIR_FIRST(data_type_pair)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(data_type_pair)) \
& (user_op::HobDataType("y", 0) == OF_PP_PAIR_SECOND(data_type_pair))); \
\
REGISTER_USER_KERNEL((std::string("") + OF_PP_PAIR_FIRST(math_type_pair) + "_grad")) \
.SetCreateFn< \
MathUnaryElementwiseGradGpuKernel<OF_PP_CAT(OF_PP_PAIR_SECOND(math_type_pair), Functor), \
OF_PP_PAIR_FIRST(data_type_pair)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(data_type_pair)));
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_MATH_UNARY_ELEMENTWISE_GPU_KERNEL_AND_GRAD,
MATH_UNARY_ELEMENTWISE_FUNC_SEQ, FLOATING_DATA_TYPE_SEQ)
template<template<typename> class UnaryFunctor>
class MathUnaryElementwiseGpuHalfKernel final : public user_op::OpKernel {
public:
MathUnaryElementwiseGpuHalfKernel() = default;
~MathUnaryElementwiseGpuHalfKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0);
const half* x = reinterpret_cast<const half*>(tensor_x->dptr<float16>());
half* y = reinterpret_cast<half*>(tensor_y->mut_dptr<float16>());
int64_t n = tensor_x->shape().elem_cnt();
CHECK_LE(n, GetMaxVal<int32_t>() / 2);
if (n == 0) { return; }
MathUnaryElementwiseForwardGpu<UnaryFunctor, half>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
n, x, y);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
template<template<typename> class UnaryFunctor>
class MathUnaryElementwiseGradGpuHalfKernel final : public user_op::OpKernel {
public:
MathUnaryElementwiseGradGpuHalfKernel() = default;
~MathUnaryElementwiseGradGpuHalfKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* tensor_dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* tensor_dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const half* x = reinterpret_cast<const half*>(tensor_x->dptr<float16>());
const half* dy = reinterpret_cast<const half*>(tensor_dy->dptr<float16>());
half* dx = reinterpret_cast<half*>(tensor_dx->mut_dptr<float16>());
int64_t n = tensor_x->shape().elem_cnt();
CHECK_LE(n, GetMaxVal<int32_t>() / 2);
if (n == 0) { return; }
MathUnaryElementwiseBackwardGpu<UnaryFunctor, half>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
n, x, dy, dx);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MATH_UNARY_ELEMENTWISE_GPU_HALF_KERNEL_AND_GRAD(math_type_str, math_func_prefix) \
REGISTER_USER_KERNEL(math_type_str) \
.SetCreateFn<MathUnaryElementwiseGpuHalfKernel<OF_PP_CAT(math_func_prefix, Functor)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == DataType::kFloat16) \
& (user_op::HobDataType("y", 0) == DataType::kFloat16)); \
\
REGISTER_USER_KERNEL((std::string("") + math_type_str + "_grad")) \
.SetCreateFn<MathUnaryElementwiseGradGpuHalfKernel<OF_PP_CAT(math_func_prefix, Functor)>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == DataType::kFloat16));
OF_PP_FOR_EACH_TUPLE(REGISTER_MATH_UNARY_ELEMENTWISE_GPU_HALF_KERNEL_AND_GRAD,
MATH_UNARY_ELEMENTWISE_FUNC_SEQ)
} // namespace oneflow
|
95790cb970f30d20c323f8cc3925c2aa97d380d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/elementwise_op.h"
namespace caffe2 {
template <typename T>
__global__ void CosKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = cos(X[i]);
}
}
template <typename T>
__global__ void CosGradientKernel(const int N, const T* X, const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = -dY[i] * sin(X[i]);
}
}
struct CosCUDAFunctor {
template <typename T>
inline void
operator()(const int n, const T* x, T* y, CUDAContext* device_context) {
hipLaunchKernelGGL(( CosKernel<T>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
device_context->cuda_stream(), n, x, y);
return;
}
};
struct CosGradientCUDAFunctor {
template <typename T>
inline void Run(
const int n,
const T* x,
const T* dy,
T* dx,
CUDAContext* device_context) {
hipLaunchKernelGGL(( CosGradientKernel<T>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
device_context->cuda_stream(), n, x, dy, dx);
return;
}
};
REGISTER_CUDA_OPERATOR(
Cos,
UnaryElementwiseOp<TensorTypes<float>, CUDAContext, CosCUDAFunctor>);
REGISTER_CUDA_OPERATOR(
CosGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
WithoutBroadcast<CosGradientCUDAFunctor>>);
} // namespace caffe2
| 95790cb970f30d20c323f8cc3925c2aa97d380d6.cu | #include <cmath>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/elementwise_op.h"
namespace caffe2 {
template <typename T>
__global__ void CosKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = cos(X[i]);
}
}
template <typename T>
__global__ void CosGradientKernel(const int N, const T* X, const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = -dY[i] * sin(X[i]);
}
}
struct CosCUDAFunctor {
template <typename T>
inline void
operator()(const int n, const T* x, T* y, CUDAContext* device_context) {
CosKernel<T>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
device_context->cuda_stream()>>>(n, x, y);
return;
}
};
struct CosGradientCUDAFunctor {
template <typename T>
inline void Run(
const int n,
const T* x,
const T* dy,
T* dx,
CUDAContext* device_context) {
CosGradientKernel<T>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
device_context->cuda_stream()>>>(n, x, dy, dx);
return;
}
};
REGISTER_CUDA_OPERATOR(
Cos,
UnaryElementwiseOp<TensorTypes<float>, CUDAContext, CosCUDAFunctor>);
REGISTER_CUDA_OPERATOR(
CosGradient,
BinaryElementwiseOp<
TensorTypes<float>,
CUDAContext,
WithoutBroadcast<CosGradientCUDAFunctor>>);
} // namespace caffe2
|
257b60f1a3d89dada1514768583aedd040adb89c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/cross_entropy.h"
namespace paddle {
namespace operators {
namespace math {
namespace {
template <typename T>
__global__ void CrossEntropyKernel(T* Y, const T* X, const int* label,
const int N, const int D) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
PADDLE_ASSERT(label[i] >= 0 && label[i] < D);
Y[i] = -math::TolerableValue<T>()(log(X[i * D + label[i]]));
}
}
template <typename T>
__device__ __forceinline__ T sum_single_warp(T val) {
val += __shfl_down(val, 16);
val += __shfl_down(val, 8);
val += __shfl_down(val, 4);
val += __shfl_down(val, 2);
val += __shfl_down(val, 1);
return val;
}
template <typename T>
__global__ void SoftCrossEntropyKernel(T* Y, const T* X, const T* label,
const int class_num) {
int tid = threadIdx.x;
extern __shared__ T d_sum[];
d_sum[tid] = 0;
int cur_idx = tid;
int next_idx = blockIdx.x * class_num + tid;
while (cur_idx < class_num) {
d_sum[tid] +=
math::TolerableValue<T>()(::log(X[next_idx])) * label[next_idx];
next_idx += blockDim.x;
cur_idx += blockDim.x;
}
__syncthreads();
for (unsigned int stride = blockDim.x >> 1; stride >= 32; stride >>= 1) {
if (tid < stride) d_sum[tid] += d_sum[tid + stride];
__syncthreads();
}
T val = d_sum[tid];
val = sum_single_warp<T>(val);
if (tid == 0) Y[blockIdx.x] = -val;
}
} // namespace
using Tensor = framework::Tensor;
template <typename T>
class CrossEntropyFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& ctx, framework::Tensor* out,
const framework::Tensor* prob,
const framework::Tensor* labels, bool softLabel) {
const T* prob_data = prob->data<T>();
T* loss_data = out->mutable_data<T>(ctx.GetPlace());
int batch_size = prob->dims()[0];
int class_num = prob->dims()[1];
if (softLabel) {
const T* label_data = labels->data<T>();
int block = class_num > 512 ? 512 : pow(2, int(std::log2(class_num)));
hipLaunchKernelGGL(( SoftCrossEntropyKernel<T>),
dim3(batch_size), dim3(block), block * sizeof(T),
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream(),
loss_data, prob_data, label_data, class_num);
} else {
const int* label_data = labels->data<int>();
int block = 512;
int grid = (batch_size + block - 1) / block;
hipLaunchKernelGGL(( CrossEntropyKernel<T>),
dim3(grid), dim3(block), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream(),
loss_data, prob_data, label_data, batch_size, class_num);
}
}
};
template class CrossEntropyFunctor<platform::GPUPlace, float>;
} // namespace math
} // namespace operators
} // namespace paddle
| 257b60f1a3d89dada1514768583aedd040adb89c.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/cross_entropy.h"
namespace paddle {
namespace operators {
namespace math {
namespace {
template <typename T>
__global__ void CrossEntropyKernel(T* Y, const T* X, const int* label,
const int N, const int D) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
PADDLE_ASSERT(label[i] >= 0 && label[i] < D);
Y[i] = -math::TolerableValue<T>()(log(X[i * D + label[i]]));
}
}
template <typename T>
__device__ __forceinline__ T sum_single_warp(T val) {
val += __shfl_down(val, 16);
val += __shfl_down(val, 8);
val += __shfl_down(val, 4);
val += __shfl_down(val, 2);
val += __shfl_down(val, 1);
return val;
}
template <typename T>
__global__ void SoftCrossEntropyKernel(T* Y, const T* X, const T* label,
const int class_num) {
int tid = threadIdx.x;
extern __shared__ T d_sum[];
d_sum[tid] = 0;
int cur_idx = tid;
int next_idx = blockIdx.x * class_num + tid;
while (cur_idx < class_num) {
d_sum[tid] +=
math::TolerableValue<T>()(std::log(X[next_idx])) * label[next_idx];
next_idx += blockDim.x;
cur_idx += blockDim.x;
}
__syncthreads();
for (unsigned int stride = blockDim.x >> 1; stride >= 32; stride >>= 1) {
if (tid < stride) d_sum[tid] += d_sum[tid + stride];
__syncthreads();
}
T val = d_sum[tid];
val = sum_single_warp<T>(val);
if (tid == 0) Y[blockIdx.x] = -val;
}
} // namespace
using Tensor = framework::Tensor;
template <typename T>
class CrossEntropyFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& ctx, framework::Tensor* out,
const framework::Tensor* prob,
const framework::Tensor* labels, bool softLabel) {
const T* prob_data = prob->data<T>();
T* loss_data = out->mutable_data<T>(ctx.GetPlace());
int batch_size = prob->dims()[0];
int class_num = prob->dims()[1];
if (softLabel) {
const T* label_data = labels->data<T>();
int block = class_num > 512 ? 512 : pow(2, int(std::log2(class_num)));
SoftCrossEntropyKernel<T><<<
batch_size, block, block * sizeof(T),
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream()>>>(
loss_data, prob_data, label_data, class_num);
} else {
const int* label_data = labels->data<int>();
int block = 512;
int grid = (batch_size + block - 1) / block;
CrossEntropyKernel<T><<<
grid, block, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream()>>>(
loss_data, prob_data, label_data, batch_size, class_num);
}
}
};
template class CrossEntropyFunctor<platform::GPUPlace, float>;
} // namespace math
} // namespace operators
} // namespace paddle
|
031dcbef6430a6e6e9aec0b516e476d42bda3c14.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include<limits>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
using namespace std;
struct NUM_ADD
{
short2 read_haplotype_number;
int address_array;
};
double diff(timespec start, timespec end)
{
double a=0;
if((end.tv_nsec-start.tv_nsec)<0)
{
a=end.tv_sec-start.tv_sec-1;
a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0;
}
else
{
a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0;
}
return a;
}
__global__ void pairHMM( int size, char * data, NUM_ADD * num_add, float * result,float * MG,float * DG, float * IG) // what is the maximum number of parameters?
{
int offset=blockIdx.x;
__shared__ short2 read_haplotype_number;
__shared__ char * read_base_array;
__shared__ float * parameter_array;
__shared__ char4 * haplotype_base_array;
__shared__ float * MGG;
__shared__ float * IGG;
__shared__ float * DGG;
if(threadIdx.x==0)
{
MGG=MG+offset*500;
DGG=IG+offset*500;
IGG=DG+offset*500;
}
while(offset<size)
{
float result_block=0;
__shared__ int round;
__shared__ int skip;
//as each time it will deal with 2 read&haplotype pairs
// each block deal with one pairs of haplotype & read
if( threadIdx.x==0)
{
// printf("Set MGG DGG IGG\n");
read_haplotype_number=num_add[offset].read_haplotype_number;
read_base_array=(char *) (data+num_add[offset].address_array);
parameter_array=(float *) (read_base_array+(read_haplotype_number.x+127)/128*128);
skip=(sizeof(float)*read_haplotype_number.x+128-1)/128*128/sizeof(float);
haplotype_base_array=(char4 *) (parameter_array+skip*4);
// printf("%d %d %d \n", read_haplotype_number.x,read_haplotype_number.y, num_add[offset].address_array);
// printf("%p %p %p\n",read_base_array, parameter_array, haplotype_base_array);
// printf("skip=%d\n", skip);
round=(read_haplotype_number.x+blockDim.x-1)/blockDim.x;
}
__syncthreads();
__shared__ char haplotype_base_in_char[500];
int hh=(read_haplotype_number.y+4-1)/4;
int tt=(hh+blockDim.x-1)/blockDim.x;
for(int ii=0;ii<tt;ii++)
{
int aa=threadIdx.x+ii*blockDim.x;
if(aa< hh)
{
char4 haplotype_base_in_thread;
haplotype_base_in_thread=haplotype_base_array[aa]; //Is it right to get data from global memory
haplotype_base_in_char[aa*4]=haplotype_base_in_thread.x;
haplotype_base_in_char[aa*4+1]=haplotype_base_in_thread.y;
haplotype_base_in_char[aa*4+2]=haplotype_base_in_thread.z;
haplotype_base_in_char[aa*4+3]=haplotype_base_in_thread.w;
//printf("%c %c %c %c\n", haplotype_base_in_thread.x,haplotype_base_in_thread.y,haplotype_base_in_thread.z, haplotype_base_in_thread.w);
}
}
__syncthreads();
__shared__ float MM[130]; //left all the 160 should be equal to the size of block, should I use dynamic share memory size of MM, DD and II shold be the size of the block.
__shared__ float DD[130]; //left
__shared__ float II[130]; //left
char read_base;
float D_0=1.329228e+36/(float)read_haplotype_number.y;
int read_number=read_haplotype_number.x;
int round_size;
if(threadIdx.x==0)
{
MM[0]=0;
DD[0]=D_0;
II[0]=0;
}
for(int i=0;i<round;i++)
{
round_size=(read_number>blockDim.x)?blockDim.x: read_number;
read_number=(read_number>blockDim.x)?read_number-blockDim.x:0; // read_num is the remaining length at this round
char read_base;
float M=1.0f; //now
float Qm,Qm_1,alpha,beta,delta,epsion,xiksi;//thet;
if(threadIdx.x<round_size ) // tid is from 0 ~ round_size-1
{
read_base=read_base_array[threadIdx.x+blockDim.x*i];
delta=parameter_array[threadIdx.x+blockDim.x*i+skip];
xiksi=parameter_array[threadIdx.x+blockDim.x*i+2*skip];
alpha=parameter_array[threadIdx.x+blockDim.x*i+3*skip];
epsion=0.1;
//beta=M-epsion;
beta=0.9;
Qm=parameter_array[threadIdx.x+blockDim.x*i];
Qm_1=M-Qm;
Qm=fdividef(Qm,3.0f);
//printf("%d %e %e %e %e %e %e \n",threadIdx.x, Qm_1, Qm, alpha, beta, delta, xiksi);
}
//why not use else break;? Because we use __syncthreads() we need to make sure that all threads could reach that point
M=0;
float I=0; //now
float D=0; //now
float MMM=0;//up left
float DDD=0;//up left
float III=0;//up left
if(threadIdx.x==0&&i==0) DDD=D_0; // Just in the first round, it need to be D_0
int current_haplotype_id=0;
for(int j=0;j<round_size+read_haplotype_number.y-1;j++)
{
int aa=j-threadIdx.x;
if( aa>=0 && (current_haplotype_id<read_haplotype_number.y))
{
if(i>0&&threadIdx.x==0)
{
MM[0]=MGG[current_haplotype_id];
II[0]=IGG[current_haplotype_id];
DD[0]=DGG[current_haplotype_id];
}
float MID=__fadd_rn(III,DDD);
DDD=DD[threadIdx.x];
III=II[threadIdx.x];
float DDM=__fmul_rn(M,xiksi);
float IIMI=__fmul_rn(II[threadIdx.x],epsion);
float MIIDD=__fmul_rn(beta,MID);
char haplotype_base_each=haplotype_base_in_char[current_haplotype_id];
float aa=(haplotype_base_each==read_base)? Qm_1:Qm;
D=__fmaf_rn(D,epsion,DDM);
//D=__fmaf_rn(D,thet,DDM);
I=__fmaf_rn(MM[threadIdx.x],delta,IIMI);
float MMID=__fmaf_rn(alpha,MMM,MIIDD);
MMM=MM[threadIdx.x];
current_haplotype_id++;
M=__fmul_rn(aa,MMID);
}
__syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads.
MM[threadIdx.x+1]=M;
DD[threadIdx.x+1]=D;
II[threadIdx.x+1]=I;
if(threadIdx.x==round_size-1 && i==round-1)
result_block=__fadd_rn(result_block,__fadd_rn(M,I));
if(i<round-1&&threadIdx.x==round_size-1 ) // tid is the last thread but there are more round
{
MGG[current_haplotype_id-1]=M;
IGG[current_haplotype_id-1]=I;
DGG[current_haplotype_id-1]=D;
}
__syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed.
}
}
if(threadIdx.x==round_size-1)
{
result[offset]=result_block;
}
offset+=gridDim.x;
}
}
struct InputData
{
int read_size;
char read_base[260];
char base_quals[260];
char ins_quals[260];
char del_quals[260];
char gcp_quals[260];
int haplotype_size;
char haplotype_base[500];
};
int main(int argc, char * argv[])
{
float * MG;
float * DG;
float * IG;
hipMalloc( (float **)& MG,sizeof(float) *240*500*3);
DG=MG+240*500;// ????
IG=DG+240*500; //?????
int INI=(log10f((std::numeric_limits<float>::max() / 16)));
hipFree(0);
int size_each_for=4000000;
//scanf("%d", &size_each_for);
struct timespec start,finish;
struct timespec start_total,finish_total;
double total_time=0;
double computation_time=0,mem_cpy_time=0,read_time=0, data_prepare=0;
FILE * file;
file=fopen("/data/04068/sren/dir_chromosome-10/b.txt","r");
// file=fopen("32_data.txt","r");
// file=fopen(argv[1], "r");
// file=fopen("a.txt","r");
int size;
fscanf(file,"%d",&size);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
float ph2pr_h[128];
for(int i=0;i<128;i++)
{
ph2pr_h[i]=powf(10.f, -((float)i) / 10.f);
}
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
data_prepare+=diff(start,finish);
int total=0;
float read_read, haplotype_haplotype;
while(!feof(file))
{
total+=size;
char useless;
useless=fgetc(file);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
InputData *inputdata=(InputData* )malloc(size*(sizeof(InputData)));
// int size_each_for=1000;
for(int i=0;i<size;i++)
{
int read_size;
fscanf(file,"%d\n",&inputdata[i].read_size);
fscanf(file,"%s ",inputdata[i].read_base);
read_size=inputdata[i].read_size;
// if(read_size>200)
// printf("read size is bigger than 200: size is %d \n", read_size);
read_read=read_size;
for(int j=0;j<read_size;j++)
{
int aa;
fscanf(file,"%d ",&aa);
inputdata[i]. base_quals[j]=(char)aa;
}
for(int j=0;j<read_size;j++)
{
int aa;
fscanf(file,"%d ",&aa);
inputdata[i].ins_quals[j]=(char)aa;
}
for(int j=0;j<read_size;j++)
{
int aa;
fscanf(file,"%d ",&aa);
inputdata[i].del_quals[j]=(char)aa;
}
for(int j=0;j<read_size;j++)
{
int aa;
if(j<read_size-1) fscanf(file,"%d ",&aa);
else fscanf(file,"%d \n",&aa);
inputdata[i].gcp_quals[j]=(char)aa;
}
fscanf(file,"%d\n",&inputdata[i].haplotype_size);
fscanf(file, "%s\n",inputdata[i].haplotype_base);
haplotype_haplotype=inputdata[i].haplotype_size;
}
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
read_time+=diff(start,finish);
float * result_h=(float *) malloc(sizeof(float)*size); //on cpu
clock_gettime(CLOCK_MONOTONIC_RAW,&start_total);
char * data_h_total;
char * result_d_total;
//printf("size=%d\n",size *700* sizeof (char)+size*200*4*sizeof(float)+size*sizeof(NUM_ADD) );
int memory_malloc_size=(size*260+127)/128*128; //read_base
memory_malloc_size+=(size*500+127)/128*128; // haplotyp_base;
memory_malloc_size+=(size*260*4+127)/128*128;//parameter1;
memory_malloc_size+=(size*260*4+127)/128*128;//parameter2;
memory_malloc_size+=(size*260*4+127)/128*128;//parameter3;
memory_malloc_size+=(size*260*4+127)/128*128;//parameter4;
memory_malloc_size+=(size*4+127)/128*128;//result;
memory_malloc_size+=(size*sizeof(NUM_ADD)+127)/128*128;//NUM_ADD;
data_h_total=(char*)malloc(memory_malloc_size); //on cpu
hipError_t err;
err=hipMalloc( (char **) &result_d_total, memory_malloc_size);
if(err!=hipSuccess)
printf( "1 Error %d: %s!\n", err, hipGetErrorString(err) );
//printf("%p %p \n", result_d_total,result_d_total+memory_malloc_size);
char * data_d_total=result_d_total+(size*sizeof(float)+127)/128*128; //on GPU
//int num_streams=(size+size_each_for-1)/size_each_for;
//hipStream_t * streams=(hipStream_t *) malloc(num_streams*sizeof(hipStream_t));
//for(int aaa=0;aaa<num_streams;aaa++)
//hipStreamCreate(&streams[aaa]);
//for(int aaa=0;aaa<num_streams;aaa++)
//{
//int size_in_each=size_each_for;
//if(aaa==num_streams-1)
// size_in_each=size-aaa*size_each_for;
//char * data_h=data_h_total+base*1500*sizeof(char)+base*sizeof(NUM_ADD);
//char * data_h_begin=data_h;
char * data_h=data_h_total; //cpu
char * data_h_begin=data_h; //cpu
NUM_ADD *data_num_add=(NUM_ADD *) (data_h); //cpu
data_h=data_h+(size*sizeof(NUM_ADD)+127)/128*128; // it is 64*x .thus we donot need to worry about alignment.
int data_size=0;
for(int i=0;i<size;i++)
{
int read_size=inputdata[i].read_size;
int skip=(sizeof(float)*read_size+128-1)/128*128/sizeof(float);
// float * parameter=(float *) malloc(skip*sizeof(float)*4);
float parameter[1040];
for(int j=0;j<read_size;j++)
{
parameter[j]= ph2pr_h[inputdata[i].base_quals[j]&127 ]; //QM
parameter[j+skip]=ph2pr_h[inputdata[i].ins_quals[j]&127]; //Qi
parameter[j+skip*2]=ph2pr_h[inputdata[i].del_quals[j]&127]; //QD
parameter[j+skip*3]=1.0f-ph2pr_h[((int)(inputdata[i].ins_quals[j]&127)+(int)(inputdata[i].del_quals[j]&127))&127]; //alpha
// printf("%e %e %e %e\n", parameter[j],parameter[j+read_size], parameter[j+read_size*2],parameter[j+read_size*3]);
}
char read_base_new[260];
for(int j=0;j<read_size;j++)
{
read_base_new[j]=inputdata[i].read_base[j];
}
int haplotype_new_size=(inputdata[i].haplotype_size+4-1)/4;
char4 haplotype_base_new[150];;
for(int j=0;j<haplotype_new_size;j++)
{
haplotype_base_new[j].x=inputdata[i].haplotype_base[j*4];
if(j*4+1<inputdata[i].haplotype_size)
haplotype_base_new[j].y=inputdata[i].haplotype_base[j*4+1];
if(j*4+2<inputdata[i].haplotype_size)
haplotype_base_new[j].z=inputdata[i].haplotype_base[j*4+2];
if(j*4+3<inputdata[i].haplotype_size)
haplotype_base_new[j].w=inputdata[i].haplotype_base[j*4+3];
}
data_num_add[i].read_haplotype_number.x=inputdata[i].read_size;
data_num_add[i].read_haplotype_number.y=inputdata[i].haplotype_size;
data_num_add[i].address_array=data_size;
//read base
memcpy(data_h,read_base_new,sizeof(char)*read_size);
data_h+=(read_size+128-1)/128*128;
data_size+=(read_size+128-1)/128*128;
//printf("data_size=%d\n", data_size);
//Parameter
memcpy(data_h,parameter,sizeof(float) *skip*4);
data_h+=sizeof(float) *skip*4;
data_size+=sizeof(float) *skip*4;
//printf("data_size=%d\n", data_size);
//haplotype
memcpy(data_h,haplotype_base_new,sizeof(char4)* haplotype_new_size);
data_h+=(haplotype_new_size*sizeof(char4)+128-1)/128*128;
data_size+=(haplotype_new_size*sizeof(char4)+128-1)/128*128;
//printf("data_size=%d\n", data_size);
}
int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128;
char * data_d=data_d_total;
float * result_d=(float *) (result_d_total);
NUM_ADD * num_add_d=(NUM_ADD *) (data_d);
data_d=data_d+(sizeof(NUM_ADD)*size+127)/128*128;
//printf("size_to_copy=%d\n", data_size_to_copy);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
// hipMemcpyAsync(data_d,data_h_begin,data_size_to_copy,hipMemcpyHostToDevice,streams[aaa]);
err=hipMemcpy(data_d_total,data_h_begin,data_size_to_copy,hipMemcpyHostToDevice);
if(err!=hipSuccess)
printf( "2 Error %d: %s!\n", err, hipGetErrorString(err) );
// call kernel
int blocksize=128;
int gridsize=240;//240;
hipLaunchKernelGGL(( pairHMM), dim3(gridsize),dim3(blocksize), 0, 0, size,data_d,num_add_d,result_d,MG,DG,IG);
//hipDeviceSynchronize();
hipMemcpy(result_h,result_d_total,size*sizeof(float),hipMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
computation_time+=diff(start,finish);
for(int i=0;i<size;i++)
float aa=(log10f((double)result_h[i]) - INI);
// printf(" i=%d %e\n",i, result_h[i]);
//
free(data_h_total);
err=hipFree(result_d_total);
if(err!=hipSuccess)
printf( "3 Error %d: %s!\n", err, hipGetErrorString(err) );
// }
clock_gettime(CLOCK_MONOTONIC_RAW,&finish_total);
total_time+=diff(start_total,finish_total);
free(inputdata);
free(result_h);
fscanf(file,"%d",&size);
// printf("%d\n",size);
// if(total>600)
// break;
}//end of while
// clock_gettime(CLOCK_MONOTONIC_RAW,&start);
hipFree(MG);
hipDeviceReset();
// clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
// mem_cpy_time+=diff(start,finish);//(finish1.tv_nsec-start1.tv_nsec)/1000000000.0;
// printf("size %d\n",total);
printf("read_time=%e initial_time=%e computation_time= %e total_time=%e\n",read_time, data_prepare,computation_time, computation_time+mem_cpy_time);
// printf("%d %d %d %e\n", fakesize, read_read, haplotype_haplotype,computation_time);
// printf("GCUPS: %lf \n", fakesize*read_read*haplotype_haplotype/computation_time/1000000000);
printf("Total time=%e\n",total_time);
return 0;
}
| 031dcbef6430a6e6e9aec0b516e476d42bda3c14.cu | #include <iostream>
#include<limits>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <cuda.h>
#include <stdint.h>
#include <math.h>
#include <unistd.h>
#include <omp.h>
using namespace std;
struct NUM_ADD
{
short2 read_haplotype_number;
int address_array;
};
double diff(timespec start, timespec end)
{
double a=0;
if((end.tv_nsec-start.tv_nsec)<0)
{
a=end.tv_sec-start.tv_sec-1;
a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0;
}
else
{
a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0;
}
return a;
}
__global__ void pairHMM( int size, char * data, NUM_ADD * num_add, float * result,float * MG,float * DG, float * IG) // what is the maximum number of parameters?
{
int offset=blockIdx.x;
__shared__ short2 read_haplotype_number;
__shared__ char * read_base_array;
__shared__ float * parameter_array;
__shared__ char4 * haplotype_base_array;
__shared__ float * MGG;
__shared__ float * IGG;
__shared__ float * DGG;
if(threadIdx.x==0)
{
MGG=MG+offset*500;
DGG=IG+offset*500;
IGG=DG+offset*500;
}
while(offset<size)
{
float result_block=0;
__shared__ int round;
__shared__ int skip;
//as each time it will deal with 2 read&haplotype pairs
// each block deal with one pairs of haplotype & read
if( threadIdx.x==0)
{
// printf("Set MGG DGG IGG\n");
read_haplotype_number=num_add[offset].read_haplotype_number;
read_base_array=(char *) (data+num_add[offset].address_array);
parameter_array=(float *) (read_base_array+(read_haplotype_number.x+127)/128*128);
skip=(sizeof(float)*read_haplotype_number.x+128-1)/128*128/sizeof(float);
haplotype_base_array=(char4 *) (parameter_array+skip*4);
// printf("%d %d %d \n", read_haplotype_number.x,read_haplotype_number.y, num_add[offset].address_array);
// printf("%p %p %p\n",read_base_array, parameter_array, haplotype_base_array);
// printf("skip=%d\n", skip);
round=(read_haplotype_number.x+blockDim.x-1)/blockDim.x;
}
__syncthreads();
__shared__ char haplotype_base_in_char[500];
int hh=(read_haplotype_number.y+4-1)/4;
int tt=(hh+blockDim.x-1)/blockDim.x;
for(int ii=0;ii<tt;ii++)
{
int aa=threadIdx.x+ii*blockDim.x;
if(aa< hh)
{
char4 haplotype_base_in_thread;
haplotype_base_in_thread=haplotype_base_array[aa]; //Is it right to get data from global memory
haplotype_base_in_char[aa*4]=haplotype_base_in_thread.x;
haplotype_base_in_char[aa*4+1]=haplotype_base_in_thread.y;
haplotype_base_in_char[aa*4+2]=haplotype_base_in_thread.z;
haplotype_base_in_char[aa*4+3]=haplotype_base_in_thread.w;
//printf("%c %c %c %c\n", haplotype_base_in_thread.x,haplotype_base_in_thread.y,haplotype_base_in_thread.z, haplotype_base_in_thread.w);
}
}
__syncthreads();
__shared__ float MM[130]; //left all the 160 should be equal to the size of block, should I use dynamic share memory size of MM, DD and II shold be the size of the block.
__shared__ float DD[130]; //left
__shared__ float II[130]; //left
char read_base;
float D_0=1.329228e+36/(float)read_haplotype_number.y;
int read_number=read_haplotype_number.x;
int round_size;
if(threadIdx.x==0)
{
MM[0]=0;
DD[0]=D_0;
II[0]=0;
}
for(int i=0;i<round;i++)
{
round_size=(read_number>blockDim.x)?blockDim.x: read_number;
read_number=(read_number>blockDim.x)?read_number-blockDim.x:0; // read_num is the remaining length at this round
char read_base;
float M=1.0f; //now
float Qm,Qm_1,alpha,beta,delta,epsion,xiksi;//thet;
if(threadIdx.x<round_size ) // tid is from 0 ~ round_size-1
{
read_base=read_base_array[threadIdx.x+blockDim.x*i];
delta=parameter_array[threadIdx.x+blockDim.x*i+skip];
xiksi=parameter_array[threadIdx.x+blockDim.x*i+2*skip];
alpha=parameter_array[threadIdx.x+blockDim.x*i+3*skip];
epsion=0.1;
//beta=M-epsion;
beta=0.9;
Qm=parameter_array[threadIdx.x+blockDim.x*i];
Qm_1=M-Qm;
Qm=fdividef(Qm,3.0f);
//printf("%d %e %e %e %e %e %e \n",threadIdx.x, Qm_1, Qm, alpha, beta, delta, xiksi);
}
//why not use else break;? Because we use __syncthreads() we need to make sure that all threads could reach that point
M=0;
float I=0; //now
float D=0; //now
float MMM=0;//up left
float DDD=0;//up left
float III=0;//up left
if(threadIdx.x==0&&i==0) DDD=D_0; // Just in the first round, it need to be D_0
int current_haplotype_id=0;
for(int j=0;j<round_size+read_haplotype_number.y-1;j++)
{
int aa=j-threadIdx.x;
if( aa>=0 && (current_haplotype_id<read_haplotype_number.y))
{
if(i>0&&threadIdx.x==0)
{
MM[0]=MGG[current_haplotype_id];
II[0]=IGG[current_haplotype_id];
DD[0]=DGG[current_haplotype_id];
}
float MID=__fadd_rn(III,DDD);
DDD=DD[threadIdx.x];
III=II[threadIdx.x];
float DDM=__fmul_rn(M,xiksi);
float IIMI=__fmul_rn(II[threadIdx.x],epsion);
float MIIDD=__fmul_rn(beta,MID);
char haplotype_base_each=haplotype_base_in_char[current_haplotype_id];
float aa=(haplotype_base_each==read_base)? Qm_1:Qm;
D=__fmaf_rn(D,epsion,DDM);
//D=__fmaf_rn(D,thet,DDM);
I=__fmaf_rn(MM[threadIdx.x],delta,IIMI);
float MMID=__fmaf_rn(alpha,MMM,MIIDD);
MMM=MM[threadIdx.x];
current_haplotype_id++;
M=__fmul_rn(aa,MMID);
}
__syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads.
MM[threadIdx.x+1]=M;
DD[threadIdx.x+1]=D;
II[threadIdx.x+1]=I;
if(threadIdx.x==round_size-1 && i==round-1)
result_block=__fadd_rn(result_block,__fadd_rn(M,I));
if(i<round-1&&threadIdx.x==round_size-1 ) // tid is the last thread but there are more round
{
MGG[current_haplotype_id-1]=M;
IGG[current_haplotype_id-1]=I;
DGG[current_haplotype_id-1]=D;
}
__syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed.
}
}
if(threadIdx.x==round_size-1)
{
result[offset]=result_block;
}
offset+=gridDim.x;
}
}
struct InputData
{
int read_size;
char read_base[260];
char base_quals[260];
char ins_quals[260];
char del_quals[260];
char gcp_quals[260];
int haplotype_size;
char haplotype_base[500];
};
int main(int argc, char * argv[])
{
float * MG;
float * DG;
float * IG;
cudaMalloc( (float **)& MG,sizeof(float) *240*500*3);
DG=MG+240*500;// ????
IG=DG+240*500; //?????
int INI=(log10f((std::numeric_limits<float>::max() / 16)));
cudaFree(0);
int size_each_for=4000000;
//scanf("%d", &size_each_for);
struct timespec start,finish;
struct timespec start_total,finish_total;
double total_time=0;
double computation_time=0,mem_cpy_time=0,read_time=0, data_prepare=0;
FILE * file;
file=fopen("/data/04068/sren/dir_chromosome-10/b.txt","r");
// file=fopen("32_data.txt","r");
// file=fopen(argv[1], "r");
// file=fopen("a.txt","r");
int size;
fscanf(file,"%d",&size);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
float ph2pr_h[128];
for(int i=0;i<128;i++)
{
ph2pr_h[i]=powf(10.f, -((float)i) / 10.f);
}
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
data_prepare+=diff(start,finish);
int total=0;
float read_read, haplotype_haplotype;
while(!feof(file))
{
total+=size;
char useless;
useless=fgetc(file);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
InputData *inputdata=(InputData* )malloc(size*(sizeof(InputData)));
// int size_each_for=1000;
for(int i=0;i<size;i++)
{
int read_size;
fscanf(file,"%d\n",&inputdata[i].read_size);
fscanf(file,"%s ",inputdata[i].read_base);
read_size=inputdata[i].read_size;
// if(read_size>200)
// printf("read size is bigger than 200: size is %d \n", read_size);
read_read=read_size;
for(int j=0;j<read_size;j++)
{
int aa;
fscanf(file,"%d ",&aa);
inputdata[i]. base_quals[j]=(char)aa;
}
for(int j=0;j<read_size;j++)
{
int aa;
fscanf(file,"%d ",&aa);
inputdata[i].ins_quals[j]=(char)aa;
}
for(int j=0;j<read_size;j++)
{
int aa;
fscanf(file,"%d ",&aa);
inputdata[i].del_quals[j]=(char)aa;
}
for(int j=0;j<read_size;j++)
{
int aa;
if(j<read_size-1) fscanf(file,"%d ",&aa);
else fscanf(file,"%d \n",&aa);
inputdata[i].gcp_quals[j]=(char)aa;
}
fscanf(file,"%d\n",&inputdata[i].haplotype_size);
fscanf(file, "%s\n",inputdata[i].haplotype_base);
haplotype_haplotype=inputdata[i].haplotype_size;
}
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
read_time+=diff(start,finish);
float * result_h=(float *) malloc(sizeof(float)*size); //on cpu
clock_gettime(CLOCK_MONOTONIC_RAW,&start_total);
char * data_h_total;
char * result_d_total;
//printf("size=%d\n",size *700* sizeof (char)+size*200*4*sizeof(float)+size*sizeof(NUM_ADD) );
int memory_malloc_size=(size*260+127)/128*128; //read_base
memory_malloc_size+=(size*500+127)/128*128; // haplotyp_base;
memory_malloc_size+=(size*260*4+127)/128*128;//parameter1;
memory_malloc_size+=(size*260*4+127)/128*128;//parameter2;
memory_malloc_size+=(size*260*4+127)/128*128;//parameter3;
memory_malloc_size+=(size*260*4+127)/128*128;//parameter4;
memory_malloc_size+=(size*4+127)/128*128;//result;
memory_malloc_size+=(size*sizeof(NUM_ADD)+127)/128*128;//NUM_ADD;
data_h_total=(char*)malloc(memory_malloc_size); //on cpu
cudaError err;
err=cudaMalloc( (char **) &result_d_total, memory_malloc_size);
if(err!=cudaSuccess)
printf( "1 Error %d: %s!\n", err, cudaGetErrorString(err) );
//printf("%p %p \n", result_d_total,result_d_total+memory_malloc_size);
char * data_d_total=result_d_total+(size*sizeof(float)+127)/128*128; //on GPU
//int num_streams=(size+size_each_for-1)/size_each_for;
//cudaStream_t * streams=(cudaStream_t *) malloc(num_streams*sizeof(cudaStream_t));
//for(int aaa=0;aaa<num_streams;aaa++)
//cudaStreamCreate(&streams[aaa]);
//for(int aaa=0;aaa<num_streams;aaa++)
//{
//int size_in_each=size_each_for;
//if(aaa==num_streams-1)
// size_in_each=size-aaa*size_each_for;
//char * data_h=data_h_total+base*1500*sizeof(char)+base*sizeof(NUM_ADD);
//char * data_h_begin=data_h;
char * data_h=data_h_total; //cpu
char * data_h_begin=data_h; //cpu
NUM_ADD *data_num_add=(NUM_ADD *) (data_h); //cpu
data_h=data_h+(size*sizeof(NUM_ADD)+127)/128*128; // it is 64*x .thus we donot need to worry about alignment.
int data_size=0;
for(int i=0;i<size;i++)
{
int read_size=inputdata[i].read_size;
int skip=(sizeof(float)*read_size+128-1)/128*128/sizeof(float);
// float * parameter=(float *) malloc(skip*sizeof(float)*4);
float parameter[1040];
for(int j=0;j<read_size;j++)
{
parameter[j]= ph2pr_h[inputdata[i].base_quals[j]&127 ]; //QM
parameter[j+skip]=ph2pr_h[inputdata[i].ins_quals[j]&127]; //Qi
parameter[j+skip*2]=ph2pr_h[inputdata[i].del_quals[j]&127]; //QD
parameter[j+skip*3]=1.0f-ph2pr_h[((int)(inputdata[i].ins_quals[j]&127)+(int)(inputdata[i].del_quals[j]&127))&127]; //alpha
// printf("%e %e %e %e\n", parameter[j],parameter[j+read_size], parameter[j+read_size*2],parameter[j+read_size*3]);
}
char read_base_new[260];
for(int j=0;j<read_size;j++)
{
read_base_new[j]=inputdata[i].read_base[j];
}
int haplotype_new_size=(inputdata[i].haplotype_size+4-1)/4;
char4 haplotype_base_new[150];;
for(int j=0;j<haplotype_new_size;j++)
{
haplotype_base_new[j].x=inputdata[i].haplotype_base[j*4];
if(j*4+1<inputdata[i].haplotype_size)
haplotype_base_new[j].y=inputdata[i].haplotype_base[j*4+1];
if(j*4+2<inputdata[i].haplotype_size)
haplotype_base_new[j].z=inputdata[i].haplotype_base[j*4+2];
if(j*4+3<inputdata[i].haplotype_size)
haplotype_base_new[j].w=inputdata[i].haplotype_base[j*4+3];
}
data_num_add[i].read_haplotype_number.x=inputdata[i].read_size;
data_num_add[i].read_haplotype_number.y=inputdata[i].haplotype_size;
data_num_add[i].address_array=data_size;
//read base
memcpy(data_h,read_base_new,sizeof(char)*read_size);
data_h+=(read_size+128-1)/128*128;
data_size+=(read_size+128-1)/128*128;
//printf("data_size=%d\n", data_size);
//Parameter
memcpy(data_h,parameter,sizeof(float) *skip*4);
data_h+=sizeof(float) *skip*4;
data_size+=sizeof(float) *skip*4;
//printf("data_size=%d\n", data_size);
//haplotype
memcpy(data_h,haplotype_base_new,sizeof(char4)* haplotype_new_size);
data_h+=(haplotype_new_size*sizeof(char4)+128-1)/128*128;
data_size+=(haplotype_new_size*sizeof(char4)+128-1)/128*128;
//printf("data_size=%d\n", data_size);
}
int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128;
char * data_d=data_d_total;
float * result_d=(float *) (result_d_total);
NUM_ADD * num_add_d=(NUM_ADD *) (data_d);
data_d=data_d+(sizeof(NUM_ADD)*size+127)/128*128;
//printf("size_to_copy=%d\n", data_size_to_copy);
clock_gettime(CLOCK_MONOTONIC_RAW,&start);
// cudaMemcpyAsync(data_d,data_h_begin,data_size_to_copy,cudaMemcpyHostToDevice,streams[aaa]);
err=cudaMemcpy(data_d_total,data_h_begin,data_size_to_copy,cudaMemcpyHostToDevice);
if(err!=cudaSuccess)
printf( "2 Error %d: %s!\n", err, cudaGetErrorString(err) );
// call kernel
int blocksize=128;
int gridsize=240;//240;
pairHMM<<<gridsize,blocksize>>> (size,data_d,num_add_d,result_d,MG,DG,IG);
//cudaDeviceSynchronize();
cudaMemcpy(result_h,result_d_total,size*sizeof(float),cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
computation_time+=diff(start,finish);
for(int i=0;i<size;i++)
float aa=(log10f((double)result_h[i]) - INI);
// printf(" i=%d %e\n",i, result_h[i]);
//
free(data_h_total);
err=cudaFree(result_d_total);
if(err!=cudaSuccess)
printf( "3 Error %d: %s!\n", err, cudaGetErrorString(err) );
// }
clock_gettime(CLOCK_MONOTONIC_RAW,&finish_total);
total_time+=diff(start_total,finish_total);
free(inputdata);
free(result_h);
fscanf(file,"%d",&size);
// printf("%d\n",size);
// if(total>600)
// break;
}//end of while
// clock_gettime(CLOCK_MONOTONIC_RAW,&start);
cudaFree(MG);
cudaDeviceReset();
// clock_gettime(CLOCK_MONOTONIC_RAW,&finish);
// mem_cpy_time+=diff(start,finish);//(finish1.tv_nsec-start1.tv_nsec)/1000000000.0;
// printf("size %d\n",total);
printf("read_time=%e initial_time=%e computation_time= %e total_time=%e\n",read_time, data_prepare,computation_time, computation_time+mem_cpy_time);
// printf("%d %d %d %e\n", fakesize, read_read, haplotype_haplotype,computation_time);
// printf("GCUPS: %lf \n", fakesize*read_read*haplotype_haplotype/computation_time/1000000000);
printf("Total time=%e\n",total_time);
return 0;
}
|
2fb2616b93aeb15a132d98eedb2626cf623b83f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_drhouupdx_kernel;
int xdim0_drhouupdx_kernel_h = -1;
int ydim0_drhouupdx_kernel_h = -1;
__constant__ int xdim1_drhouupdx_kernel;
int xdim1_drhouupdx_kernel_h = -1;
int ydim1_drhouupdx_kernel_h = -1;
__constant__ int xdim2_drhouupdx_kernel;
int xdim2_drhouupdx_kernel_h = -1;
int ydim2_drhouupdx_kernel_h = -1;
__constant__ int xdim3_drhouupdx_kernel;
int xdim3_drhouupdx_kernel_h = -1;
int ydim3_drhouupdx_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC3(x) (x)
// user function
__device__
void
drhouupdx_kernel_gpu(const double *rhou_new, const double *rho_new,
const double *rhoE_new, double *rhou_res) {
double fni =
rhou_new[OPS_ACC0(0)] * rhou_new[OPS_ACC0(0)] / rho_new[OPS_ACC1(0)];
double p = gam1 * (rhoE_new[OPS_ACC2(0)] - 0.5 * fni);
fni = fni + p;
double fnim1 =
rhou_new[OPS_ACC0(-1)] * rhou_new[OPS_ACC0(-1)] / rho_new[OPS_ACC1(-1)];
p = gam1 * (rhoE_new[OPS_ACC2(-1)] - 0.5 * fnim1);
fnim1 = fnim1 + p;
double fnim2 =
rhou_new[OPS_ACC0(-2)] * rhou_new[OPS_ACC0(-2)] / rho_new[OPS_ACC1(-2)];
p = gam1 * (rhoE_new[OPS_ACC2(-2)] - 0.5 * fnim2);
fnim2 = fnim2 + p;
double fnip1 =
rhou_new[OPS_ACC0(1)] * rhou_new[OPS_ACC0(1)] / rho_new[OPS_ACC1(1)];
p = gam1 * (rhoE_new[OPS_ACC2(1)] - 0.5 * fnip1);
fnip1 = fnip1 + p;
double fnip2 =
rhou_new[OPS_ACC0(2)] * rhou_new[OPS_ACC0(2)] / rho_new[OPS_ACC1(2)];
p = gam1 * (rhoE_new[OPS_ACC2(2)] - 0.5 * fnip2);
fnip2 = fnip2 + p;
double deriv = (fnim2 - fnip2 + 8.0 * (fnip1 - fnim1)) / (12.00 * dx);
rhou_res[OPS_ACC3(0)] = deriv;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_drhouupdx_kernel(const double *__restrict arg0,
const double *__restrict arg1,
const double *__restrict arg2,
double *__restrict arg3, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 1;
if (idx_x < size0) {
drhouupdx_kernel_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_drhouupdx_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 4))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(4, "drhouupdx_kernel");
OPS_kernels[4].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
if (xdim0 != xdim0_drhouupdx_kernel_h || xdim1 != xdim1_drhouupdx_kernel_h ||
xdim2 != xdim2_drhouupdx_kernel_h || xdim3 != xdim3_drhouupdx_kernel_h) {
hipMemcpyToSymbol(xdim0_drhouupdx_kernel, &xdim0, sizeof(int));
xdim0_drhouupdx_kernel_h = xdim0;
hipMemcpyToSymbol(xdim1_drhouupdx_kernel, &xdim1, sizeof(int));
xdim1_drhouupdx_kernel_h = xdim1;
hipMemcpyToSymbol(xdim2_drhouupdx_kernel, &xdim2, sizeof(int));
xdim2_drhouupdx_kernel_h = xdim2;
hipMemcpyToSymbol(xdim3_drhouupdx_kernel, &xdim3, sizeof(int));
xdim3_drhouupdx_kernel_h = xdim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[4].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_drhouupdx_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[4].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[3], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[4].mpi_time += t2 - t1;
OPS_kernels[4].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[4].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[4].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[4].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
| 2fb2616b93aeb15a132d98eedb2626cf623b83f2.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_drhouupdx_kernel;
int xdim0_drhouupdx_kernel_h = -1;
int ydim0_drhouupdx_kernel_h = -1;
__constant__ int xdim1_drhouupdx_kernel;
int xdim1_drhouupdx_kernel_h = -1;
int ydim1_drhouupdx_kernel_h = -1;
__constant__ int xdim2_drhouupdx_kernel;
int xdim2_drhouupdx_kernel_h = -1;
int ydim2_drhouupdx_kernel_h = -1;
__constant__ int xdim3_drhouupdx_kernel;
int xdim3_drhouupdx_kernel_h = -1;
int ydim3_drhouupdx_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x) (x)
#define OPS_ACC1(x) (x)
#define OPS_ACC2(x) (x)
#define OPS_ACC3(x) (x)
// user function
__device__
void
drhouupdx_kernel_gpu(const double *rhou_new, const double *rho_new,
const double *rhoE_new, double *rhou_res) {
double fni =
rhou_new[OPS_ACC0(0)] * rhou_new[OPS_ACC0(0)] / rho_new[OPS_ACC1(0)];
double p = gam1 * (rhoE_new[OPS_ACC2(0)] - 0.5 * fni);
fni = fni + p;
double fnim1 =
rhou_new[OPS_ACC0(-1)] * rhou_new[OPS_ACC0(-1)] / rho_new[OPS_ACC1(-1)];
p = gam1 * (rhoE_new[OPS_ACC2(-1)] - 0.5 * fnim1);
fnim1 = fnim1 + p;
double fnim2 =
rhou_new[OPS_ACC0(-2)] * rhou_new[OPS_ACC0(-2)] / rho_new[OPS_ACC1(-2)];
p = gam1 * (rhoE_new[OPS_ACC2(-2)] - 0.5 * fnim2);
fnim2 = fnim2 + p;
double fnip1 =
rhou_new[OPS_ACC0(1)] * rhou_new[OPS_ACC0(1)] / rho_new[OPS_ACC1(1)];
p = gam1 * (rhoE_new[OPS_ACC2(1)] - 0.5 * fnip1);
fnip1 = fnip1 + p;
double fnip2 =
rhou_new[OPS_ACC0(2)] * rhou_new[OPS_ACC0(2)] / rho_new[OPS_ACC1(2)];
p = gam1 * (rhoE_new[OPS_ACC2(2)] - 0.5 * fnip2);
fnip2 = fnip2 + p;
double deriv = (fnim2 - fnip2 + 8.0 * (fnip1 - fnim1)) / (12.00 * dx);
rhou_res[OPS_ACC3(0)] = deriv;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_drhouupdx_kernel(const double *__restrict arg0,
const double *__restrict arg1,
const double *__restrict arg2,
double *__restrict arg3, int size0) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1;
arg1 += idx_x * 1 * 1;
arg2 += idx_x * 1 * 1;
arg3 += idx_x * 1 * 1;
if (idx_x < size0) {
drhouupdx_kernel_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_drhouupdx_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 4))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(4, "drhouupdx_kernel");
OPS_kernels[4].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[1];
int end[1];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 1; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 1; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
if (xdim0 != xdim0_drhouupdx_kernel_h || xdim1 != xdim1_drhouupdx_kernel_h ||
xdim2 != xdim2_drhouupdx_kernel_h || xdim3 != xdim3_drhouupdx_kernel_h) {
cudaMemcpyToSymbol(xdim0_drhouupdx_kernel, &xdim0, sizeof(int));
xdim0_drhouupdx_kernel_h = xdim0;
cudaMemcpyToSymbol(xdim1_drhouupdx_kernel, &xdim1, sizeof(int));
xdim1_drhouupdx_kernel_h = xdim1;
cudaMemcpyToSymbol(xdim2_drhouupdx_kernel, &xdim2, sizeof(int));
xdim2_drhouupdx_kernel_h = xdim2;
cudaMemcpyToSymbol(xdim3_drhouupdx_kernel, &xdim3, sizeof(int));
xdim3_drhouupdx_kernel_h = xdim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1);
dim3 tblock(OPS_block_size_x, 1, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[4].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_drhouupdx_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[4].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[3], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[4].mpi_time += t2 - t1;
OPS_kernels[4].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[4].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[4].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[4].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
|
30205d3786549227450b3a0685a813b6387b1333.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal s
*/
/*
blk_M=96 blk_N=96 blk_K=16 nthd_x=64 nthd_y=4
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define magmablas_sgemm_fermi magmablas_sgemm
texture<float,1> tex_x_float_A;
texture<float,1> tex_x_float_B;
static __inline__ __device__ float fetch_x_A(const int& i, const float * x)
{
return tex1Dfetch(tex_x_float_A, i);
}
static __inline__ __device__ float fetch_x_B(const int& i, const float * x)
{
return tex1Dfetch(tex_x_float_B, i);
}
extern "C" __global__ void
fermiSgemm_v2_kernel_NN(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb, int ldc,
float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 96;
const int ibx = blockIdx.x * 96;
const int idt = ty * 64 + tx;
const int tx2= idt%16; // idx2
const int ty2= idt/16; // idy2
__shared__ float Bb[16][97];
__shared__ float Abs[96][17];
float xxA[6];
float xxB[6];
int trackA = offsetA + ibx + tx2 + __mul24(ty2, lda);
int trackB = offsetB + tx2 + __mul24(iby+ty2*6, ldb);
int tll = ty2;
A += trackA;
B += trackB;
// read a block of 96x16 to A and 16x96 to B
// each thread reads 6 data point, 1 point in each 16x16 subblock
#pragma unroll
for(int y=0; y<6; y++)
Abs[tx2+y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
#pragma unroll
for(int y=0; y<6; y++)
Bb[tx2][ty2*6+y] = fetch_x_B( trackB + y*ldb, B) ;
__syncthreads();
const float *Bend = B + k-16;
float Axs[6];
float Bxp[6];
float Cb[36] = {0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0,
0,0,0,0,0,0, 0,0,0,0,0,0};
do
{
tll += 16;
A += lda*16;
B += 16;
trackA+=16*lda ;
trackB+=16;
// calculate part of C using the first 96x8 of A and 8x96 of B
#pragma unroll
for( int j1=0;j1<8;j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2+y*16][j1];
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int x=0; x<6; x++)
{
#pragma unroll
for( int y=0; y<6; y++)
{
Cb[x*6 + y] += Axs[x]*Bxp[y];
}
}
}
// pre-read the next A and B
#pragma unroll
for( int y=0; y<6; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
// without going through texture and tll protection,
// nonzeros are fetched
// texture boundary control seems to be providing
// safety here but officially tex1Dfetch is not suppoted
// by clamping or filtering (programming guide B.8.1)
#pragma unroll
for( int y=0; y<6; y++)
xxB[y] = fetch_x_B( trackB + y*ldb, B);
// calculate another part of C using the 2nd 96x8 of A and 8x96 of B
#pragma unroll
for( int j1=8;j1<16;j1++)
{
#pragma unroll
for( int y=0;y<6;y++)
Axs[y] = Abs[tx2+y*16][j1] ;
#pragma unroll
for( int y=0;y<6;y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int x=0;x<6;x++)
{
#pragma unroll
for( int y=0; y<6; y++)
{
Cb[x*6+y] += Axs[x]*Bxp[y];
}
}
}
__syncthreads();
// put the next A and B into position
#pragma unroll
for(int y=0;y<6;y++)
Abs[tx2+y*16][ty2] =xxA[y];
#pragma unroll
for(int y=0; y<6; y++)
Bb[tx2][ty2*6 + y] =xxB[y];
__syncthreads();
}
while (B < Bend);
// C += ty2 + ibx + __mul24 (tx2 + iby ,ldc);
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
// tail case
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0;y<6;y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int y=0;y<6;y++)
Axs[y] = Abs[tx2+y*16][j1] ;
#pragma unroll
for( int x=0;x<6;x++)
#pragma unroll
for( int y=0; y<6; y++)
Cb[x*6+y] += Axs[x]*Bxp[y];
}
// __syncthreads();
// C += ty2 + ibx + __mul24 (tx2 + iby ,ldc);
int gy = iby + ty2;
// writing C
#pragma unroll
for( int y=0; y<6; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<6; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*6] + beta * C[x*16];
}
C+=ldc*16;
}
}
//========================================================================
extern "C" __global__ void
fermiSgemm_v2_kernel_TN(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb,
int ldc, float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 96;
const int ibx = blockIdx.x * 96;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ float Bb[16][97];
__shared__ float Abs[96][17];
float xxA[6];
float xxB[6];
int trackA = offsetA + tx2+ __mul24(ibx + ty2*6, lda );
int trackB = offsetB + tx2+ __mul24(iby + ty2*6, ldb );
A+= trackA;
B+= trackB;
int tll = tx2;
#pragma unroll
for(int y=0; y<6; y++)
Abs[ty2*6+y][tx2] = (tll<k)*fetch_x_A(trackA + y*lda, A);
#pragma unroll
for(int y=0; y<6; y++)
Bb[tx2][ty2*6+y] = /* (tll<k)* */ fetch_x_B(trackB + y*ldb, B ) ;
__syncthreads();
const float *Bend = B + k-16;
float Axs[6];
float Bxp[6];
float Cb[36] = {0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0,
0,0,0,0,0,0, 0,0,0,0,0,0};
do
{
tll += 16;
A += 16;
B += 16;
trackA += 16;
trackB += 16;
// pre-read the next strip of A and B
#pragma unroll
for( int y=0; y<6; y++)
xxA[y] = (tll<k)*fetch_x_A(trackA + y*lda, A);
#pragma unroll
for( int y=0; y<6; y++)
xxB[y] = /* (tll<k)* */ fetch_x_B(trackB + y*ldb, B);
// computing
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0;x<6; x++)
#pragma unroll
for(int y=0; y<6; y++)
Cb[x*6 + y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for(int y=0; y<6; y++)
Abs[ty2*6 + y][tx2] = xxA[y];
#pragma unroll
for( int y=0; y<6; y++)
Bb[tx2][ty2*6 + y] = xxB[y];
__syncthreads();
}
while (B < Bend);
C += tx2 + ibx + __mul24 (ty2 + iby, ldc);
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y] = Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<6; x++)
#pragma unroll
for( int y=0; y<6; y++)
Cb[x*6 + y] += Axs[x]*Bxp[y];
}
int gy = iby+ty2;
#pragma unroll
for(int y=0; y<6; y++, gy+=16)
{
int gx = ibx+tx2;
#pragma unroll
for(int x=0; x<6; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*6] + beta * C[x*16];
}
C+=ldc*16;
}
}
//========================================================================
extern "C" __global__ void
fermiSgemm_v2_kernel_TT(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb,
int ldc, float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 96;
const int ibx = blockIdx.x * 96;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ float Bb[16][97];
__shared__ float Abs[96][17];
float xxA[6];
float xxB[6];
int trackA = offsetA + __mul24( ibx + ty2, lda) + tx2;
int trackB = offsetB + iby+ tx2 + __mul24(ty2, ldb);
A += trackA;
B += trackB;
int tll = tx2;
#pragma unroll
for(int y=0; y<6; y++)
Abs[ty2+16*y][tx2] = /* (tll<k)* */ fetch_x_A(trackA + lda*16*y, A);
#pragma unroll
for(int y=0; y<6; y++)
Bb[ty2][tx2+16*y] = fetch_x_B(trackB+16*y, B);
__syncthreads();
const float *Bend = B + k*ldb - 16*ldb;
float Axs[6];
float Bxp[6];
float Cb[36] = {0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0,
0,0,0,0,0,0, 0,0,0,0,0,0};
do
{
tll+=16;
A += 16;
B += 16*ldb;
trackA+=16;
trackB+=16*ldb;
#pragma unroll
for( int y=0; y<6; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + lda*y*16, A);
#pragma unroll
for( int y=0; y<6; y++)
xxB[y] = fetch_x_B(trackB + 16*y, B);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<6; x++)
#pragma unroll
for( int y=0;y<6;y++)
Cb[x*6+y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for( int y=0; y<6; y++)
Abs[ty2 + 16*y][tx2] = xxA[y];
#pragma unroll
for( int y=0; y<6; y++)
Bb[ty2][tx2+y*16] = xxB[y];
__syncthreads();
}
while (B < Bend);
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<6; x++)
#pragma unroll
for( int y=0; y<6; y++)
Cb[x*6+y] += Axs[x]*Bxp[y];
}
int gy = iby + ty2;
#pragma unroll
for( int y=0; y<6; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<6; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*6] + beta * C[x*16];
}
C+=ldc*16;
}
}
//========================================================================
extern "C" __global__ void
fermiSgemm_v2_kernel_NT(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb,
int ldc, float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 96;
const int ibx = blockIdx.x * 96;
const int idt = ty * 64 + tx;
const int tx2= idt%16;
const int ty2= idt/16;
__shared__ float Bb[16][97];
__shared__ float Abs[96][17];
float xxA[6];
float xxB[6];
int trackA = offsetA + ibx +__mul24(ty2, lda) + tx2 ;
int trackB = offsetB + iby + tx2 + __mul24(ty2, ldb);
A+= trackA;
B += trackB;
int tll = ty2;
#pragma unroll
for(int y=0; y<6; y++)
Abs[tx2+ y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
#pragma unroll
for(int y=0; y<6; y++)
Bb[ty2][tx2+16*y] = /* (tll<k)* */ fetch_x_B(trackB+16*y, B);
__syncthreads();
const float *Bend = B + k*ldb - 16*ldb;
float Axs[6];
float Bxp[6];
float Cb[36] = {0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0,
0,0,0,0,0,0, 0,0,0,0,0,0};
do
{
tll += 16;
A += lda *16 ;
B += 16*ldb;
trackA+=16*lda ;
trackB+=16*ldb;
#pragma unroll
for( int y=0; y<6; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
// tll same in the NN case
#pragma unroll
for( int y=0; y<6; y++)
xxB[y] = /* (tll<k)* */ fetch_x_B( trackB + 16*y, B);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int x=0; x<6; x++)
#pragma unroll
for( int y=0; y<6; y++)
Cb[x*6+y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for( int y=0; y<6; y++)
Abs[tx2 + y*16][ty2] = xxA[y];
#pragma unroll
for( int y=0; y<6; y++)
Bb[ty2][tx2+y*16] = xxB[y];
__syncthreads();
}
while (B < Bend);
C += tx2 + ibx + __mul24(ty2 + iby ,ldc);
#pragma unroll
for(int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y] = Bb[j1][ty2 + y*16];
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int x=0; x<6; x++)
#pragma unroll
for( int y=0;y<6;y++)
Cb[x*6+y] += Axs[x]*Bxp[y];
}
int gy = iby + ty2;
#pragma unroll
for( int y=0; y<6; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<6; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y + x*6] + beta * C[x*16];
}
C+=ldc*16;
}
}
//=================================================================================
extern "C" void
magmablas_sgemm_fermi( char TRANSA, char TRANSB, magma_int_t m , magma_int_t n , magma_int_t k ,
float alpha, const float *A, magma_int_t lda,
const float *B, magma_int_t ldb,
float beta, float *C, magma_int_t ldc )
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose
=======
SGEMM performs one of the matrix-matrix operations
C := alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X',
alpha and beta are scalars, and A, B and C are matrices, with op( A )
an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
==========
TRANSA - CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
TRANSA = 'N' or 'n', op( A ) = A.
TRANSA = 'T' or 't', op( A ) = A'.
TRANSA = 'C' or 'c', op( A ) = A'.
Unchanged on exit.
TRANSB - CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
TRANSB = 'N' or 'n', op( B ) = B.
TRANSB = 'T' or 't', op( B ) = B'.
TRANSB = 'C' or 'c', op( B ) = B'.
Unchanged on exit.
M - INTEGER.
On entry, M specifies the number of rows of the matrix
op( A ) and of the matrix C. M must be at least zero.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the number of columns of the matrix
op( B ) and the number of columns of the matrix C. N must be
at least zero.
Unchanged on exit.
K - INTEGER.
On entry, K specifies the number of columns of the matrix
op( A ) and the number of rows of the matrix op( B ). K must
be at least zero.
Unchanged on exit.
ALPHA - SINGLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - SINGLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = 'N' or 'n', and is m otherwise.
Before entry with TRANSA = 'N' or 'n', the leading m by k
part of the array A must contain the matrix A, otherwise
the leading k by m part of the array A must contain the
matrix A.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = 'N' or 'n' then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
Unchanged on exit.
B - SINGLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = 'N' or 'n', and is k otherwise.
Before entry with TRANSB = 'N' or 'n', the leading k by n
part of the array B must contain the matrix B, otherwise
the leading n by k part of the array B must contain the
matrix B.
Unchanged on exit.
LDB - INTEGER.
On entry, LDB specifies the first dimension of B as declared
in the calling (sub) program. When TRANSB = 'N' or 'n' then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
Unchanged on exit.
BETA - SINGLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then C need not be set on input.
Unchanged on exit.
C - SINGLE PRECISION array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array C must
contain the matrix C, except when beta is zero, in which
case C need not be set on entry.
On exit, the array C is overwritten by the m by n matrix
( alpha*op( A )*op( B ) + beta*C ).
LDC - INTEGER.
On entry, LDC specifies the first dimension of C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
Unchanged on exit.
===================================================================== */
if (m<=0 || n<=0 || k<=0)
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 1, TransB = 1;
if (TRANSA == 'N' || TRANSA == 'n')
TransA = 0;
if (TRANSB == 'N' || TRANSB == 'n')
TransB = 0;
size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m);
size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k);
size_t CUBLAS_MAX_1DBUF_SIZE = (1 << 27) - 512;
if (sizeA>=CUBLAS_MAX_1DBUF_SIZE ||
sizeB>=CUBLAS_MAX_1DBUF_SIZE )
{
//printf("Exceeding texuture limit (CUBLAS_MAX_1DBUF_SIZE=%ld), using hipblasSgemm\n", CUBLAS_MAX_1DBUF_SIZE);
hipblasSgemm(TRANSA, TRANSB, m, n, k, alpha,
A, lda, B, ldb,
beta, C, ldc);
return;
}
hipError_t errt;
errt = hipBindTexture(&offsetA, tex_x_float_A, (int2 *)A,
sizeA * sizeof(A[0]));
if( errt != hipSuccess) printf("can not bind to texture \n");
errt = hipBindTexture(&offsetB, tex_x_float_B, (int2 *)B,
sizeB * sizeof(B[0]));
if( errt != hipSuccess) printf("can not bind to texture \n");
dim3 threads( 64, 4 );
dim3 grid(m/(96)+(m%(96)!=0),n/(96)+(n%(96)!=0));
offsetA = offsetA/sizeof(A[0]);
offsetB = offsetB/sizeof(B[0]);
if ( TransB )
if( !TransA )
hipLaunchKernelGGL(( fermiSgemm_v2_kernel_NT), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
hipLaunchKernelGGL(( fermiSgemm_v2_kernel_TT), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
if( !TransA )
hipLaunchKernelGGL(( fermiSgemm_v2_kernel_NN), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
hipLaunchKernelGGL(( fermiSgemm_v2_kernel_TN), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
hipUnbindTexture ( tex_x_float_A ) ;
hipUnbindTexture ( tex_x_float_B ) ;
}
| 30205d3786549227450b3a0685a813b6387b1333.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal s
*/
/*
blk_M=96 blk_N=96 blk_K=16 nthd_x=64 nthd_y=4
*/
#include "common_magma.h"
#include "commonblas_s.h"
#define magmablas_sgemm_fermi magmablas_sgemm
texture<float,1> tex_x_float_A;
texture<float,1> tex_x_float_B;
static __inline__ __device__ float fetch_x_A(const int& i, const float * x)
{
return tex1Dfetch(tex_x_float_A, i);
}
static __inline__ __device__ float fetch_x_B(const int& i, const float * x)
{
return tex1Dfetch(tex_x_float_B, i);
}
extern "C" __global__ void
fermiSgemm_v2_kernel_NN(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb, int ldc,
float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 96;
const int ibx = blockIdx.x * 96;
const int idt = ty * 64 + tx;
const int tx2= idt%16; // idx2
const int ty2= idt/16; // idy2
__shared__ float Bb[16][97];
__shared__ float Abs[96][17];
float xxA[6];
float xxB[6];
int trackA = offsetA + ibx + tx2 + __mul24(ty2, lda);
int trackB = offsetB + tx2 + __mul24(iby+ty2*6, ldb);
int tll = ty2;
A += trackA;
B += trackB;
// read a block of 96x16 to A and 16x96 to B
// each thread reads 6 data point, 1 point in each 16x16 subblock
#pragma unroll
for(int y=0; y<6; y++)
Abs[tx2+y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
#pragma unroll
for(int y=0; y<6; y++)
Bb[tx2][ty2*6+y] = fetch_x_B( trackB + y*ldb, B) ;
__syncthreads();
const float *Bend = B + k-16;
float Axs[6];
float Bxp[6];
float Cb[36] = {0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0,
0,0,0,0,0,0, 0,0,0,0,0,0};
do
{
tll += 16;
A += lda*16;
B += 16;
trackA+=16*lda ;
trackB+=16;
// calculate part of C using the first 96x8 of A and 8x96 of B
#pragma unroll
for( int j1=0;j1<8;j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2+y*16][j1];
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int x=0; x<6; x++)
{
#pragma unroll
for( int y=0; y<6; y++)
{
Cb[x*6 + y] += Axs[x]*Bxp[y];
}
}
}
// pre-read the next A and B
#pragma unroll
for( int y=0; y<6; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
// without going through texture and tll protection,
// nonzeros are fetched
// texture boundary control seems to be providing
// safety here but officially tex1Dfetch is not suppoted
// by clamping or filtering (programming guide B.8.1)
#pragma unroll
for( int y=0; y<6; y++)
xxB[y] = fetch_x_B( trackB + y*ldb, B);
// calculate another part of C using the 2nd 96x8 of A and 8x96 of B
#pragma unroll
for( int j1=8;j1<16;j1++)
{
#pragma unroll
for( int y=0;y<6;y++)
Axs[y] = Abs[tx2+y*16][j1] ;
#pragma unroll
for( int y=0;y<6;y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int x=0;x<6;x++)
{
#pragma unroll
for( int y=0; y<6; y++)
{
Cb[x*6+y] += Axs[x]*Bxp[y];
}
}
}
__syncthreads();
// put the next A and B into position
#pragma unroll
for(int y=0;y<6;y++)
Abs[tx2+y*16][ty2] =xxA[y];
#pragma unroll
for(int y=0; y<6; y++)
Bb[tx2][ty2*6 + y] =xxB[y];
__syncthreads();
}
while (B < Bend);
// C += ty2 + ibx + __mul24 (tx2 + iby ,ldc);
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
// tail case
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0;y<6;y++)
Bxp[y]= Bb[j1][ty2+y*16];
#pragma unroll
for( int y=0;y<6;y++)
Axs[y] = Abs[tx2+y*16][j1] ;
#pragma unroll
for( int x=0;x<6;x++)
#pragma unroll
for( int y=0; y<6; y++)
Cb[x*6+y] += Axs[x]*Bxp[y];
}
// __syncthreads();
// C += ty2 + ibx + __mul24 (tx2 + iby ,ldc);
int gy = iby + ty2;
// writing C
#pragma unroll
for( int y=0; y<6; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<6; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*6] + beta * C[x*16];
}
C+=ldc*16;
}
}
//========================================================================
extern "C" __global__ void
fermiSgemm_v2_kernel_TN(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb,
int ldc, float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 96;
const int ibx = blockIdx.x * 96;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ float Bb[16][97];
__shared__ float Abs[96][17];
float xxA[6];
float xxB[6];
int trackA = offsetA + tx2+ __mul24(ibx + ty2*6, lda );
int trackB = offsetB + tx2+ __mul24(iby + ty2*6, ldb );
A+= trackA;
B+= trackB;
int tll = tx2;
#pragma unroll
for(int y=0; y<6; y++)
Abs[ty2*6+y][tx2] = (tll<k)*fetch_x_A(trackA + y*lda, A);
#pragma unroll
for(int y=0; y<6; y++)
Bb[tx2][ty2*6+y] = /* (tll<k)* */ fetch_x_B(trackB + y*ldb, B ) ;
__syncthreads();
const float *Bend = B + k-16;
float Axs[6];
float Bxp[6];
float Cb[36] = {0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0,
0,0,0,0,0,0, 0,0,0,0,0,0};
do
{
tll += 16;
A += 16;
B += 16;
trackA += 16;
trackB += 16;
// pre-read the next strip of A and B
#pragma unroll
for( int y=0; y<6; y++)
xxA[y] = (tll<k)*fetch_x_A(trackA + y*lda, A);
#pragma unroll
for( int y=0; y<6; y++)
xxB[y] = /* (tll<k)* */ fetch_x_B(trackB + y*ldb, B);
// computing
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0;x<6; x++)
#pragma unroll
for(int y=0; y<6; y++)
Cb[x*6 + y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for(int y=0; y<6; y++)
Abs[ty2*6 + y][tx2] = xxA[y];
#pragma unroll
for( int y=0; y<6; y++)
Bb[tx2][ty2*6 + y] = xxB[y];
__syncthreads();
}
while (B < Bend);
C += tx2 + ibx + __mul24 (ty2 + iby, ldc);
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y] = Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<6; x++)
#pragma unroll
for( int y=0; y<6; y++)
Cb[x*6 + y] += Axs[x]*Bxp[y];
}
int gy = iby+ty2;
#pragma unroll
for(int y=0; y<6; y++, gy+=16)
{
int gx = ibx+tx2;
#pragma unroll
for(int x=0; x<6; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*6] + beta * C[x*16];
}
C+=ldc*16;
}
}
//========================================================================
extern "C" __global__ void
fermiSgemm_v2_kernel_TT(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb,
int ldc, float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 96;
const int ibx = blockIdx.x * 96;
const int idt = ty * 64 + tx;
const int tx2 = idt%16;
const int ty2 = idt/16;
__shared__ float Bb[16][97];
__shared__ float Abs[96][17];
float xxA[6];
float xxB[6];
int trackA = offsetA + __mul24( ibx + ty2, lda) + tx2;
int trackB = offsetB + iby+ tx2 + __mul24(ty2, ldb);
A += trackA;
B += trackB;
int tll = tx2;
#pragma unroll
for(int y=0; y<6; y++)
Abs[ty2+16*y][tx2] = /* (tll<k)* */ fetch_x_A(trackA + lda*16*y, A);
#pragma unroll
for(int y=0; y<6; y++)
Bb[ty2][tx2+16*y] = fetch_x_B(trackB+16*y, B);
__syncthreads();
const float *Bend = B + k*ldb - 16*ldb;
float Axs[6];
float Bxp[6];
float Cb[36] = {0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0,
0,0,0,0,0,0, 0,0,0,0,0,0};
do
{
tll+=16;
A += 16;
B += 16*ldb;
trackA+=16;
trackB+=16*ldb;
#pragma unroll
for( int y=0; y<6; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + lda*y*16, A);
#pragma unroll
for( int y=0; y<6; y++)
xxB[y] = fetch_x_B(trackB + 16*y, B);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<6; x++)
#pragma unroll
for( int y=0;y<6;y++)
Cb[x*6+y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for( int y=0; y<6; y++)
Abs[ty2 + 16*y][tx2] = xxA[y];
#pragma unroll
for( int y=0; y<6; y++)
Bb[ty2][tx2+y*16] = xxB[y];
__syncthreads();
}
while (B < Bend);
C += tx2 + ibx + __mul24 (ty2 + iby ,ldc);
#pragma unroll
for( int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2 + y*16][j1];
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int x=0; x<6; x++)
#pragma unroll
for( int y=0; y<6; y++)
Cb[x*6+y] += Axs[x]*Bxp[y];
}
int gy = iby + ty2;
#pragma unroll
for( int y=0; y<6; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<6; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y+x*6] + beta * C[x*16];
}
C+=ldc*16;
}
}
//========================================================================
extern "C" __global__ void
fermiSgemm_v2_kernel_NT(float *C, const float *A, const float *B,
int m, int n, int k, int lda, int ldb,
int ldc, float alpha, float beta,
int offsetA, int offsetB)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int iby = blockIdx.y * 96;
const int ibx = blockIdx.x * 96;
const int idt = ty * 64 + tx;
const int tx2= idt%16;
const int ty2= idt/16;
__shared__ float Bb[16][97];
__shared__ float Abs[96][17];
float xxA[6];
float xxB[6];
int trackA = offsetA + ibx +__mul24(ty2, lda) + tx2 ;
int trackB = offsetB + iby + tx2 + __mul24(ty2, ldb);
A+= trackA;
B += trackB;
int tll = ty2;
#pragma unroll
for(int y=0; y<6; y++)
Abs[tx2+ y*16][ty2] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
#pragma unroll
for(int y=0; y<6; y++)
Bb[ty2][tx2+16*y] = /* (tll<k)* */ fetch_x_B(trackB+16*y, B);
__syncthreads();
const float *Bend = B + k*ldb - 16*ldb;
float Axs[6];
float Bxp[6];
float Cb[36] = {0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0, 0,0,0,0,0,0,
0,0,0,0,0,0, 0,0,0,0,0,0};
do
{
tll += 16;
A += lda *16 ;
B += 16*ldb;
trackA+=16*lda ;
trackB+=16*ldb;
#pragma unroll
for( int y=0; y<6; y++)
xxA[y] = /* (tll<k)* */ fetch_x_A(trackA + y*16, A);
// tll same in the NN case
#pragma unroll
for( int y=0; y<6; y++)
xxB[y] = /* (tll<k)* */ fetch_x_B( trackB + 16*y, B);
#pragma unroll
for( int j1=0;j1<16;j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y]= Bb[j1][ty2 + y*16];
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int x=0; x<6; x++)
#pragma unroll
for( int y=0; y<6; y++)
Cb[x*6+y] += Axs[x]*Bxp[y];
}
__syncthreads();
#pragma unroll
for( int y=0; y<6; y++)
Abs[tx2 + y*16][ty2] = xxA[y];
#pragma unroll
for( int y=0; y<6; y++)
Bb[ty2][tx2+y*16] = xxB[y];
__syncthreads();
}
while (B < Bend);
C += tx2 + ibx + __mul24(ty2 + iby ,ldc);
#pragma unroll
for(int j1=0; j1<16; j1++)
{
#pragma unroll
for( int y=0; y<6; y++)
Bxp[y] = Bb[j1][ty2 + y*16];
#pragma unroll
for( int y=0; y<6; y++)
Axs[y] = Abs[tx2 + y*16][j1] ;
#pragma unroll
for( int x=0; x<6; x++)
#pragma unroll
for( int y=0;y<6;y++)
Cb[x*6+y] += Axs[x]*Bxp[y];
}
int gy = iby + ty2;
#pragma unroll
for( int y=0; y<6; y++, gy+=16)
{
int gx = ibx + tx2;
#pragma unroll
for(int x=0; x<6; x++, gx+=16)
{
if (gx < m && gy < n)
C[x*16] = alpha*Cb[y + x*6] + beta * C[x*16];
}
C+=ldc*16;
}
}
//=================================================================================
extern "C" void
magmablas_sgemm_fermi( char TRANSA, char TRANSB, magma_int_t m , magma_int_t n , magma_int_t k ,
float alpha, const float *A, magma_int_t lda,
const float *B, magma_int_t ldb,
float beta, float *C, magma_int_t ldc )
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose
=======
SGEMM performs one of the matrix-matrix operations
C := alpha*op( A )*op( B ) + beta*C,
where op( X ) is one of
op( X ) = X or op( X ) = X',
alpha and beta are scalars, and A, B and C are matrices, with op( A )
an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
Parameters
==========
TRANSA - CHARACTER*1.
On entry, TRANSA specifies the form of op( A ) to be used in
the matrix multiplication as follows:
TRANSA = 'N' or 'n', op( A ) = A.
TRANSA = 'T' or 't', op( A ) = A'.
TRANSA = 'C' or 'c', op( A ) = A'.
Unchanged on exit.
TRANSB - CHARACTER*1.
On entry, TRANSB specifies the form of op( B ) to be used in
the matrix multiplication as follows:
TRANSB = 'N' or 'n', op( B ) = B.
TRANSB = 'T' or 't', op( B ) = B'.
TRANSB = 'C' or 'c', op( B ) = B'.
Unchanged on exit.
M - INTEGER.
On entry, M specifies the number of rows of the matrix
op( A ) and of the matrix C. M must be at least zero.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the number of columns of the matrix
op( B ) and the number of columns of the matrix C. N must be
at least zero.
Unchanged on exit.
K - INTEGER.
On entry, K specifies the number of columns of the matrix
op( A ) and the number of rows of the matrix op( B ). K must
be at least zero.
Unchanged on exit.
ALPHA - SINGLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - SINGLE PRECISION array of DIMENSION ( LDA, ka ), where ka is
k when TRANSA = 'N' or 'n', and is m otherwise.
Before entry with TRANSA = 'N' or 'n', the leading m by k
part of the array A must contain the matrix A, otherwise
the leading k by m part of the array A must contain the
matrix A.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. When TRANSA = 'N' or 'n' then
LDA must be at least max( 1, m ), otherwise LDA must be at
least max( 1, k ).
Unchanged on exit.
B - SINGLE PRECISION array of DIMENSION ( LDB, kb ), where kb is
n when TRANSB = 'N' or 'n', and is k otherwise.
Before entry with TRANSB = 'N' or 'n', the leading k by n
part of the array B must contain the matrix B, otherwise
the leading n by k part of the array B must contain the
matrix B.
Unchanged on exit.
LDB - INTEGER.
On entry, LDB specifies the first dimension of B as declared
in the calling (sub) program. When TRANSB = 'N' or 'n' then
LDB must be at least max( 1, k ), otherwise LDB must be at
least max( 1, n ).
Unchanged on exit.
BETA - SINGLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then C need not be set on input.
Unchanged on exit.
C - SINGLE PRECISION array of DIMENSION ( LDC, n ).
Before entry, the leading m by n part of the array C must
contain the matrix C, except when beta is zero, in which
case C need not be set on entry.
On exit, the array C is overwritten by the m by n matrix
( alpha*op( A )*op( B ) + beta*C ).
LDC - INTEGER.
On entry, LDC specifies the first dimension of C as declared
in the calling (sub) program. LDC must be at least
max( 1, m ).
Unchanged on exit.
===================================================================== */
if (m<=0 || n<=0 || k<=0)
return;
size_t offsetA = 0;
size_t offsetB = 0;
int TransA = 1, TransB = 1;
if (TRANSA == 'N' || TRANSA == 'n')
TransA = 0;
if (TRANSB == 'N' || TRANSB == 'n')
TransB = 0;
size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m);
size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k);
size_t CUBLAS_MAX_1DBUF_SIZE = (1 << 27) - 512;
if (sizeA>=CUBLAS_MAX_1DBUF_SIZE ||
sizeB>=CUBLAS_MAX_1DBUF_SIZE )
{
//printf("Exceeding texuture limit (CUBLAS_MAX_1DBUF_SIZE=%ld), using cublasSgemm\n", CUBLAS_MAX_1DBUF_SIZE);
cublasSgemm(TRANSA, TRANSB, m, n, k, alpha,
A, lda, B, ldb,
beta, C, ldc);
return;
}
cudaError_t errt;
errt = cudaBindTexture(&offsetA, tex_x_float_A, (int2 *)A,
sizeA * sizeof(A[0]));
if( errt != cudaSuccess) printf("can not bind to texture \n");
errt = cudaBindTexture(&offsetB, tex_x_float_B, (int2 *)B,
sizeB * sizeof(B[0]));
if( errt != cudaSuccess) printf("can not bind to texture \n");
dim3 threads( 64, 4 );
dim3 grid(m/(96)+(m%(96)!=0),n/(96)+(n%(96)!=0));
offsetA = offsetA/sizeof(A[0]);
offsetB = offsetB/sizeof(B[0]);
if ( TransB )
if( !TransA )
fermiSgemm_v2_kernel_NT<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
fermiSgemm_v2_kernel_TT<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
if( !TransA )
fermiSgemm_v2_kernel_NN<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
else
fermiSgemm_v2_kernel_TN<<< grid, threads, 0, magma_stream >>>(C, A, B, m, n, k, lda, ldb,
ldc, alpha, beta,
(int)offsetA, (int)offsetB);
cudaUnbindTexture ( tex_x_float_A ) ;
cudaUnbindTexture ( tex_x_float_B ) ;
}
|
4cd129920bce2cbe42decc91680b23e1e5479da8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "counting.h"
#include <cstdio>
#include <cassert>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
__device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__global__ void buildSegTree(int shifted_pivot, int *segment_tree, const char *text=NULL, int text_size=0){
long long int idx = blockIdx.x*blockDim.x + threadIdx.x;
long long int tree_idx = shifted_pivot + idx;
//leaf
if(text){
int leaf_val = 0;
if(idx < text_size){
//be careful for using single quote
if(text[idx] != '\n'){
leaf_val = 1;
}
}
segment_tree[tree_idx] = leaf_val;
//not leaf
}else{
long long int left_tree_node = 2*tree_idx;
long long int right_tree_node = left_tree_node+1;
if(segment_tree[left_tree_node] == 0 || segment_tree[right_tree_node] == 0){
segment_tree[tree_idx] = 0;
}else{
segment_tree[tree_idx] = segment_tree[left_tree_node] + segment_tree[right_tree_node];
}
}
return;
}
__host__ int SegmentTreeSize(int text_size){
int s = 1;
for(;s<text_size;s<<=1);
return s<<1;
}
//count global
__global__ void d_countPosition(int *pos, int *segment_tree, int text_size, int seg_tree_size){
long int idx = blockIdx.x*blockDim.x + threadIdx.x;
//out of bound
if(idx >= text_size){return;}
long int leaf_shifted = seg_tree_size>>1;
//zero condition
if(segment_tree[leaf_shifted+idx] == 0){
pos[idx] = 0;
return;
}else{
//naive n*k
// int word_posi = 1;
// long long int countdown_pivot = idx - 1;
// while(countdown_pivot >=0 && segment_tree[leaf_shifted+countdown_pivot] != 0){
// word_posi += 1;
// countdown_pivot -= 1;
// }
// pos[idx] = word_posi;
//segment tree approach n*(log k)
//check node is even or odd
//even start node should move to prev odd
int length = 1;
int backtrace_id = idx;
if(backtrace_id %2!= 0){
backtrace_id -= 1;
if(segment_tree[leaf_shifted + backtrace_id] == 0){
pos[idx] = length;
return;
}else{
length += 1;
}
}
//start up trace
int max_up_trace = 512;
int loop_iv = 2;
long int check_idx = (leaf_shifted + backtrace_id)/2;
leaf_shifted >>= 1;
do{
if(check_idx % 2!= 0){
if( segment_tree[check_idx -1]>=loop_iv){
length += loop_iv;
}else{
break;
}
}else if(check_idx %2 == 0 && check_idx == leaf_shifted){
break;
}
check_idx >>= 1;
loop_iv <<= 1;
leaf_shifted >>= 1;
}while(loop_iv <= max_up_trace);
//down trace if check_idx = 0
if(segment_tree[check_idx/2] == 0 && !(check_idx == leaf_shifted && segment_tree[check_idx] > 0)){
//move down one sibling
check_idx -= 1;
//start trace
long int left_node;
long int right_node;
if(segment_tree[check_idx] == 0){
while(check_idx < seg_tree_size/2){
left_node = check_idx << 1;
right_node = left_node + 1;
if(segment_tree[right_node] > 0){
length += segment_tree[right_node];
check_idx <<= 1;
}else{
check_idx = check_idx*2 + 1;
}
}
}
}
pos[idx] = length;
}
return;
}
//cpu part
void CountPosition(const char *text, int *pos, int text_size)
{
long long int seg_tree_size = SegmentTreeSize(text_size);
long long int pos_shifted = seg_tree_size/2;
long long int to_build_siblings_size = pos_shifted;
int *d_segment_tree;
hipMalloc(&d_segment_tree, seg_tree_size*sizeof(int));
int blk_size = 256;
while(pos_shifted > 0){
//do __global__ set segment tree
long long int grid_size = CeilDiv(to_build_siblings_size, blk_size);
dim3 BLK_SIZE(blk_size, 1, 1);
dim3 GRID_SIZE(grid_size, 1, 1);
if(pos_shifted == seg_tree_size/2){
hipLaunchKernelGGL(( buildSegTree), dim3(GRID_SIZE), dim3(BLK_SIZE), 0, 0, pos_shifted, d_segment_tree, text, text_size);
}else{
hipLaunchKernelGGL(( buildSegTree), dim3(GRID_SIZE), dim3(BLK_SIZE), 0, 0, pos_shifted, d_segment_tree);
}
//update to parent for constructing parents
pos_shifted = pos_shifted/2;
to_build_siblings_size = pos_shifted;
//sync device
hipDeviceSynchronize();
}
//count position
int grid_size = CeilDiv(text_size, blk_size);
dim3 BLK_SIZE(blk_size, 1, 1);
dim3 GRID_SIZE(grid_size, 1, 1);
hipLaunchKernelGGL(( d_countPosition), dim3(GRID_SIZE), dim3(BLK_SIZE), 0, 0, pos, d_segment_tree, text_size, seg_tree_size);
//free memory
hipFree(d_segment_tree);
return;
}
struct filter_trans{
__host__ __device__ bool operator()(const int &pos){
return pos == 1;
}
};
struct is_head_trans{
__host__ __device__ int operator()(const int &pos, const int &is_head){
return (pos*is_head - 1);
}
};
struct remove_minus_one_trans{
__host__ __device__ bool operator()(const int &pos){
return(pos >= 0);
}
};
int ExtractHead(const int *pos, int *head, int text_size)
{
int *buffer;
int nhead;
hipMalloc(&buffer, sizeof(int)*text_size*2); // this is enough
//use thrust pointer to manipulate thrust algorithms
thrust::device_ptr<const int> pos_d(pos);
thrust::device_ptr<int> head_d(head), flag_d(buffer), cumsum_d(buffer+text_size);
// TODO
// if 1 is 1 otherwise are 0
thrust::transform(pos_d, pos_d+text_size, flag_d,filter_trans());
//calculate count
nhead = thrust::count(flag_d, flag_d+text_size, 1);
thrust::sequence(flag_d+text_size, flag_d+2*text_size, 1);
// multiply minus 1 is larger than 0 is answer
thrust::transform(flag_d+text_size, flag_d+2*text_size, flag_d, flag_d, is_head_trans());
// copy to head_d
// manipulate the address in memory directly
thrust::copy_if(flag_d, flag_d+text_size, head_d, remove_minus_one_trans());
hipFree(buffer);
return nhead;
}
void Part3(char *text, int *pos, int *head, int text_size, int n_head)
{
}
| 4cd129920bce2cbe42decc91680b23e1e5479da8.cu | #include "counting.h"
#include <cstdio>
#include <cassert>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
__device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__global__ void buildSegTree(int shifted_pivot, int *segment_tree, const char *text=NULL, int text_size=0){
long long int idx = blockIdx.x*blockDim.x + threadIdx.x;
long long int tree_idx = shifted_pivot + idx;
//leaf
if(text){
int leaf_val = 0;
if(idx < text_size){
//be careful for using single quote
if(text[idx] != '\n'){
leaf_val = 1;
}
}
segment_tree[tree_idx] = leaf_val;
//not leaf
}else{
long long int left_tree_node = 2*tree_idx;
long long int right_tree_node = left_tree_node+1;
if(segment_tree[left_tree_node] == 0 || segment_tree[right_tree_node] == 0){
segment_tree[tree_idx] = 0;
}else{
segment_tree[tree_idx] = segment_tree[left_tree_node] + segment_tree[right_tree_node];
}
}
return;
}
__host__ int SegmentTreeSize(int text_size){
int s = 1;
for(;s<text_size;s<<=1);
return s<<1;
}
//count global
__global__ void d_countPosition(int *pos, int *segment_tree, int text_size, int seg_tree_size){
long int idx = blockIdx.x*blockDim.x + threadIdx.x;
//out of bound
if(idx >= text_size){return;}
long int leaf_shifted = seg_tree_size>>1;
//zero condition
if(segment_tree[leaf_shifted+idx] == 0){
pos[idx] = 0;
return;
}else{
//naive n*k
// int word_posi = 1;
// long long int countdown_pivot = idx - 1;
// while(countdown_pivot >=0 && segment_tree[leaf_shifted+countdown_pivot] != 0){
// word_posi += 1;
// countdown_pivot -= 1;
// }
// pos[idx] = word_posi;
//segment tree approach n*(log k)
//check node is even or odd
//even start node should move to prev odd
int length = 1;
int backtrace_id = idx;
if(backtrace_id %2!= 0){
backtrace_id -= 1;
if(segment_tree[leaf_shifted + backtrace_id] == 0){
pos[idx] = length;
return;
}else{
length += 1;
}
}
//start up trace
int max_up_trace = 512;
int loop_iv = 2;
long int check_idx = (leaf_shifted + backtrace_id)/2;
leaf_shifted >>= 1;
do{
if(check_idx % 2!= 0){
if( segment_tree[check_idx -1]>=loop_iv){
length += loop_iv;
}else{
break;
}
}else if(check_idx %2 == 0 && check_idx == leaf_shifted){
break;
}
check_idx >>= 1;
loop_iv <<= 1;
leaf_shifted >>= 1;
}while(loop_iv <= max_up_trace);
//down trace if check_idx = 0
if(segment_tree[check_idx/2] == 0 && !(check_idx == leaf_shifted && segment_tree[check_idx] > 0)){
//move down one sibling
check_idx -= 1;
//start trace
long int left_node;
long int right_node;
if(segment_tree[check_idx] == 0){
while(check_idx < seg_tree_size/2){
left_node = check_idx << 1;
right_node = left_node + 1;
if(segment_tree[right_node] > 0){
length += segment_tree[right_node];
check_idx <<= 1;
}else{
check_idx = check_idx*2 + 1;
}
}
}
}
pos[idx] = length;
}
return;
}
//cpu part
void CountPosition(const char *text, int *pos, int text_size)
{
long long int seg_tree_size = SegmentTreeSize(text_size);
long long int pos_shifted = seg_tree_size/2;
long long int to_build_siblings_size = pos_shifted;
int *d_segment_tree;
cudaMalloc(&d_segment_tree, seg_tree_size*sizeof(int));
int blk_size = 256;
while(pos_shifted > 0){
//do __global__ set segment tree
long long int grid_size = CeilDiv(to_build_siblings_size, blk_size);
dim3 BLK_SIZE(blk_size, 1, 1);
dim3 GRID_SIZE(grid_size, 1, 1);
if(pos_shifted == seg_tree_size/2){
buildSegTree<<<GRID_SIZE, BLK_SIZE>>>(pos_shifted, d_segment_tree, text, text_size);
}else{
buildSegTree<<<GRID_SIZE, BLK_SIZE>>>(pos_shifted, d_segment_tree);
}
//update to parent for constructing parents
pos_shifted = pos_shifted/2;
to_build_siblings_size = pos_shifted;
//sync device
cudaDeviceSynchronize();
}
//count position
int grid_size = CeilDiv(text_size, blk_size);
dim3 BLK_SIZE(blk_size, 1, 1);
dim3 GRID_SIZE(grid_size, 1, 1);
d_countPosition<<<GRID_SIZE, BLK_SIZE>>>(pos, d_segment_tree, text_size, seg_tree_size);
//free memory
cudaFree(d_segment_tree);
return;
}
struct filter_trans{
__host__ __device__ bool operator()(const int &pos){
return pos == 1;
}
};
struct is_head_trans{
__host__ __device__ int operator()(const int &pos, const int &is_head){
return (pos*is_head - 1);
}
};
struct remove_minus_one_trans{
__host__ __device__ bool operator()(const int &pos){
return(pos >= 0);
}
};
int ExtractHead(const int *pos, int *head, int text_size)
{
int *buffer;
int nhead;
cudaMalloc(&buffer, sizeof(int)*text_size*2); // this is enough
//use thrust pointer to manipulate thrust algorithms
thrust::device_ptr<const int> pos_d(pos);
thrust::device_ptr<int> head_d(head), flag_d(buffer), cumsum_d(buffer+text_size);
// TODO
// if 1 is 1 otherwise are 0
thrust::transform(pos_d, pos_d+text_size, flag_d,filter_trans());
//calculate count
nhead = thrust::count(flag_d, flag_d+text_size, 1);
thrust::sequence(flag_d+text_size, flag_d+2*text_size, 1);
// multiply minus 1 is larger than 0 is answer
thrust::transform(flag_d+text_size, flag_d+2*text_size, flag_d, flag_d, is_head_trans());
// copy to head_d
// manipulate the address in memory directly
thrust::copy_if(flag_d, flag_d+text_size, head_d, remove_minus_one_trans());
cudaFree(buffer);
return nhead;
}
void Part3(char *text, int *pos, int *head, int text_size, int n_head)
{
}
|
32f563f37635b52f0032bccef8c49b6d53160ac3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/NumericUtils.h>
#include <ATen/native/Pool.h>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/NumericLimits.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <c10/macros/Macros.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/max_pool3d_with_indices_native.h>
#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
#include <ATen/ops/zeros_like.h>
#endif
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
template <typename scalar_t>
__global__ static void max_pool3d_with_indices_single_out_frame(
scalar_t* inputData,
PackedTensorAccessor64<scalar_t, 4> output,
PackedTensorAccessor64<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time
int64_t slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature
// For int64_t data type, see https://github.com/pytorch/pytorch/issues/52822
if (oRow < output.size(2) && oColumn < output.size(3))
{
int tStart = oFrame * dT - pT;
int hStart = oRow * dH - pH;
int wStart = oColumn * dW - pW;
int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime);
int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight);
int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth);
while(tStart < 0)
tStart += dilationT;
while(hStart < 0)
hStart += dilationH;
while(wStart < 0)
wStart += dilationW;
int maxIndex = tStart * iheight * iwidth + hStart * iwidth + wStart;
inputData += slice * itime * iheight * iwidth;
scalar_t max = at::numeric_limits<scalar_t>::lower_bound(); // -Infinity
for (int t = tStart; t < tEnd; t += dilationT)
{
for (int h = hStart; h < hEnd; h += dilationH)
{
for (int w = wStart; w < wEnd; w += dilationW)
{
int index = t * iheight * iwidth + h * iwidth + w;
scalar_t val = inputData[index];
if ((max < val) || at::_isnan(val))
{
max = val;
maxIndex = index;
}
}
}
}
output[slice][oFrame][oRow][oColumn] = max;
indices[slice][oFrame][oRow][oColumn] = maxIndex;
}
}
template <typename scalar_t>
void max_pool3d_with_indices_out_frame(
scalar_t* input_data,
const Tensor& output,
const Tensor& indices,
int totalZ,
int itime, int iheight, int iwidth,
int otime, int oheight, int owidth,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW)
{
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(ceil_div(owidth, static_cast<int>(block.x)),
ceil_div(oheight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( max_pool3d_with_indices_single_out_frame)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_data,
output.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
itime, iheight, iwidth,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
offsetZ);
C10_HIP_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}
#undef UPDATE_OUTPUT_KERNEL_WIDTH
template <typename scalar_t>
__global__ static void max_pool3d_with_indices_backward_single_out_frame(
scalar_t *gradInputData,
PackedTensorAccessor64<scalar_t, 4> gradOutput,
PackedTensorAccessor64<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature
if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3))
{
int maxIndex = indices[slice][oFrame][oRow][oColumn];
if (maxIndex != -1) {
gpuAtomicAddNoReturn(&gradInputData[slice * itime * iheight * iwidth + maxIndex],
gradOutput[slice][oFrame][oRow][oColumn]);
}
}
}
template <typename scalar_t>
void max_pool3d_with_indices_backward_out_frame(
scalar_t *gradInputData,
const Tensor& gradOutput,
const Tensor& indices,
int64_t totalZ,
int itime, int iheight, int iwidth,
int oheight, int owidth,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH)
{
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(ceil_div(owidth, static_cast<int>(block.x)),
ceil_div(oheight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( max_pool3d_with_indices_backward_single_out_frame)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInputData,
gradOutput.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
itime, iheight, iwidth,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH,
offsetZ);
C10_HIP_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}
void max_pool3d_with_indices_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ input, "input", 3 };
checkAllSameGPU(__func__,
{output_arg, indices_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3,
"max_pool3d: kernel_size must either be a single int, or a tuple of three ints")
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);
TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3,
"max_pool3d: stride must either be omitted, a single int, or a tuple of three ints")
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 3,
"max_pool3d: padding must be either be a single int, or a tuple of three ints");
const int pT = safe_downcast<int, int64_t>(padding[0]);
const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]);
const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]);
TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3,
"max_pool3d: dilation must be either a single int, or a tuple of three ints");
const int dilationT = safe_downcast<int, int64_t>(dilation[0]);
const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]);
const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]);
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t itime = input.size(-3);
const int64_t iheight = input.size(-2);
const int64_t iwidth = input.size(-1);
const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode);
const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode);
const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode);
pool3d_shape_check(
input,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth,
"max_pool3d_with_indices_out_cuda_template()");
if (input.ndimension() == 4) {
output.resize_({ nslices, otime, oheight, owidth});
indices.resize_({nslices, otime, oheight, owidth});
}
else {
output.resize_({nbatch, nslices, otime, oheight, owidth});
indices.resize_({nbatch, nslices, otime, oheight, owidth});
}
if (input.numel() == 0) {
return;
}
Tensor work_input = input.contiguous();
Tensor work_output = output;
Tensor work_indices = indices;
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth});
work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(),
"max_pool3d_with_indices_out_frame",
[&]{
scalar_t *input_data = work_input.data_ptr<scalar_t>();
int64_t totalZ = otime * nslices * nbatch;
max_pool3d_with_indices_out_frame(
input_data, work_output, work_indices,
totalZ,
itime, iheight, iwidth,
otime, oheight, owidth,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW);
}
);
}
void max_pool3d_with_indices_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
const Tensor& indices,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 };
TensorArg input_arg{ input, "input", 3 };
TensorArg indices_arg{ indices, "indices", 4 };
checkAllSameGPU(__func__,
{gradInput_arg, gradOutput_arg, input_arg, indices_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3,
"max_pool3d: kernel_size must either be a single int, or a tuple of three ints")
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);
TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3,
"max_pool3d: stride must either be omitted, a single int, or a tuple of three ints")
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 3,
"max_pool3d: padding must be either be a single int, or a tuple of three ints");
const int pT = safe_downcast<int, int64_t>(padding[0]);
const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]);
const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]);
TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3,
"max_pool3d: dilation must be either a single int, or a tuple of three ints");
const int dilationT = safe_downcast<int, int64_t>(dilation[0]);
const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]);
const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]);
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"max_pool2d_with_indices_backward_out_cuda_template(): ",
"Expected 4D or 5D input tensor, but got ", input.sizes());
TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5),
"max_pool2d_with_indices_backward_out_cuda_template(): ",
"Expected 4D or 5D gradOutput tensor, but got ", gradOutput.sizes());
// Resize and initialize result tensor.
gradInput.resize_as_(input);
gradInput.zero_();
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t otime = gradOutput.size(-3);
const int64_t oheight = gradOutput.size(-2);
const int64_t owidth = gradOutput.size(-1);
const int64_t itime = gradInput.size(-3);
const int64_t iheight = gradInput.size(-2);
const int64_t iwidth = gradInput.size(-1);
max_pool3d_backward_shape_check(
input,
gradOutput,
indices,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth,
"max_pool3d_with_indices_backward_out_cuda_template()");
if (gradOutput.numel() == 0) {
return;
}
Tensor work_grad_input = gradInput;
Tensor work_grad_output = gradOutput.contiguous();
Tensor work_indices = indices.contiguous();
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth});
work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"max_pool3d_with_indices_backward_out_frame",
[&] {
const int64_t totalZ = otime * nslices * nbatch;
scalar_t *grad_input_data = work_grad_input.data_ptr<scalar_t>();
max_pool3d_with_indices_backward_out_frame(
grad_input_data, work_grad_output, work_indices,
totalZ,
itime, iheight, iwidth,
oheight, owidth,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH);
}
);
}
} // namespace
std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda(const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
Tensor& output,
Tensor& indices)
{
max_pool3d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
NoNamesGuard guard;
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
max_pool3d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
guard.reset();
namedinference::propagate_names(output, input);
namedinference::propagate_names(indices, input);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& max_pool3d_with_indices_backward_out_cuda(const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices,
Tensor& gradInput)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_out_cuda");
max_pool3d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
Tensor max_pool3d_with_indices_backward_cuda(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_cuda");
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
max_pool3d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
} // at::native
} // at
| 32f563f37635b52f0032bccef8c49b6d53160ac3.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/NumericUtils.h>
#include <ATen/native/Pool.h>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <c10/macros/Macros.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/max_pool3d_with_indices_native.h>
#include <ATen/ops/max_pool3d_with_indices_backward_native.h>
#include <ATen/ops/zeros_like.h>
#endif
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
template <typename scalar_t>
__global__ static void max_pool3d_with_indices_single_out_frame(
scalar_t* inputData,
PackedTensorAccessor64<scalar_t, 4> output,
PackedTensorAccessor64<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time
int64_t slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature
// For int64_t data type, see https://github.com/pytorch/pytorch/issues/52822
if (oRow < output.size(2) && oColumn < output.size(3))
{
int tStart = oFrame * dT - pT;
int hStart = oRow * dH - pH;
int wStart = oColumn * dW - pW;
int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime);
int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight);
int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth);
while(tStart < 0)
tStart += dilationT;
while(hStart < 0)
hStart += dilationH;
while(wStart < 0)
wStart += dilationW;
int maxIndex = tStart * iheight * iwidth + hStart * iwidth + wStart;
inputData += slice * itime * iheight * iwidth;
scalar_t max = at::numeric_limits<scalar_t>::lower_bound(); // -Infinity
for (int t = tStart; t < tEnd; t += dilationT)
{
for (int h = hStart; h < hEnd; h += dilationH)
{
for (int w = wStart; w < wEnd; w += dilationW)
{
int index = t * iheight * iwidth + h * iwidth + w;
scalar_t val = inputData[index];
if ((max < val) || at::_isnan(val))
{
max = val;
maxIndex = index;
}
}
}
}
output[slice][oFrame][oRow][oColumn] = max;
indices[slice][oFrame][oRow][oColumn] = maxIndex;
}
}
template <typename scalar_t>
void max_pool3d_with_indices_out_frame(
scalar_t* input_data,
const Tensor& output,
const Tensor& indices,
int totalZ,
int itime, int iheight, int iwidth,
int otime, int oheight, int owidth,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW)
{
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(ceil_div(owidth, static_cast<int>(block.x)),
ceil_div(oheight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
max_pool3d_with_indices_single_out_frame
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
input_data,
output.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
itime, iheight, iwidth,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
offsetZ);
C10_CUDA_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}
#undef UPDATE_OUTPUT_KERNEL_WIDTH
template <typename scalar_t>
__global__ static void max_pool3d_with_indices_backward_single_out_frame(
scalar_t *gradInputData,
PackedTensorAccessor64<scalar_t, 4> gradOutput,
PackedTensorAccessor64<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature
if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3))
{
int maxIndex = indices[slice][oFrame][oRow][oColumn];
if (maxIndex != -1) {
gpuAtomicAddNoReturn(&gradInputData[slice * itime * iheight * iwidth + maxIndex],
gradOutput[slice][oFrame][oRow][oColumn]);
}
}
}
template <typename scalar_t>
void max_pool3d_with_indices_backward_out_frame(
scalar_t *gradInputData,
const Tensor& gradOutput,
const Tensor& indices,
int64_t totalZ,
int itime, int iheight, int iwidth,
int oheight, int owidth,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH)
{
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(ceil_div(owidth, static_cast<int>(block.x)),
ceil_div(oheight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
max_pool3d_with_indices_backward_single_out_frame
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
gradInputData,
gradOutput.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
itime, iheight, iwidth,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH,
offsetZ);
C10_CUDA_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}
void max_pool3d_with_indices_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ input, "input", 3 };
checkAllSameGPU(__func__,
{output_arg, indices_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3,
"max_pool3d: kernel_size must either be a single int, or a tuple of three ints")
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);
TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3,
"max_pool3d: stride must either be omitted, a single int, or a tuple of three ints")
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 3,
"max_pool3d: padding must be either be a single int, or a tuple of three ints");
const int pT = safe_downcast<int, int64_t>(padding[0]);
const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]);
const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]);
TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3,
"max_pool3d: dilation must be either a single int, or a tuple of three ints");
const int dilationT = safe_downcast<int, int64_t>(dilation[0]);
const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]);
const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]);
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t itime = input.size(-3);
const int64_t iheight = input.size(-2);
const int64_t iwidth = input.size(-1);
const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode);
const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode);
const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode);
pool3d_shape_check(
input,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth,
"max_pool3d_with_indices_out_cuda_template()");
if (input.ndimension() == 4) {
output.resize_({ nslices, otime, oheight, owidth});
indices.resize_({nslices, otime, oheight, owidth});
}
else {
output.resize_({nbatch, nslices, otime, oheight, owidth});
indices.resize_({nbatch, nslices, otime, oheight, owidth});
}
if (input.numel() == 0) {
return;
}
Tensor work_input = input.contiguous();
Tensor work_output = output;
Tensor work_indices = indices;
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth});
work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(),
"max_pool3d_with_indices_out_frame",
[&]{
scalar_t *input_data = work_input.data_ptr<scalar_t>();
int64_t totalZ = otime * nslices * nbatch;
max_pool3d_with_indices_out_frame(
input_data, work_output, work_indices,
totalZ,
itime, iheight, iwidth,
otime, oheight, owidth,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW);
}
);
}
void max_pool3d_with_indices_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
const Tensor& indices,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 };
TensorArg input_arg{ input, "input", 3 };
TensorArg indices_arg{ indices, "indices", 4 };
checkAllSameGPU(__func__,
{gradInput_arg, gradOutput_arg, input_arg, indices_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3,
"max_pool3d: kernel_size must either be a single int, or a tuple of three ints")
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);
TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3,
"max_pool3d: stride must either be omitted, a single int, or a tuple of three ints")
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 3,
"max_pool3d: padding must be either be a single int, or a tuple of three ints");
const int pT = safe_downcast<int, int64_t>(padding[0]);
const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]);
const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]);
TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3,
"max_pool3d: dilation must be either a single int, or a tuple of three ints");
const int dilationT = safe_downcast<int, int64_t>(dilation[0]);
const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]);
const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]);
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"max_pool2d_with_indices_backward_out_cuda_template(): ",
"Expected 4D or 5D input tensor, but got ", input.sizes());
TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5),
"max_pool2d_with_indices_backward_out_cuda_template(): ",
"Expected 4D or 5D gradOutput tensor, but got ", gradOutput.sizes());
// Resize and initialize result tensor.
gradInput.resize_as_(input);
gradInput.zero_();
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t otime = gradOutput.size(-3);
const int64_t oheight = gradOutput.size(-2);
const int64_t owidth = gradOutput.size(-1);
const int64_t itime = gradInput.size(-3);
const int64_t iheight = gradInput.size(-2);
const int64_t iwidth = gradInput.size(-1);
max_pool3d_backward_shape_check(
input,
gradOutput,
indices,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth,
"max_pool3d_with_indices_backward_out_cuda_template()");
if (gradOutput.numel() == 0) {
return;
}
Tensor work_grad_input = gradInput;
Tensor work_grad_output = gradOutput.contiguous();
Tensor work_indices = indices.contiguous();
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth});
work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"max_pool3d_with_indices_backward_out_frame",
[&] {
const int64_t totalZ = otime * nslices * nbatch;
scalar_t *grad_input_data = work_grad_input.data_ptr<scalar_t>();
max_pool3d_with_indices_backward_out_frame(
grad_input_data, work_grad_output, work_indices,
totalZ,
itime, iheight, iwidth,
oheight, owidth,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH);
}
);
}
} // namespace
std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda(const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
Tensor& output,
Tensor& indices)
{
max_pool3d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
NoNamesGuard guard;
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
max_pool3d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
guard.reset();
namedinference::propagate_names(output, input);
namedinference::propagate_names(indices, input);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& max_pool3d_with_indices_backward_out_cuda(const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices,
Tensor& gradInput)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_out_cuda");
max_pool3d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
Tensor max_pool3d_with_indices_backward_cuda(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_cuda");
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
max_pool3d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
} // at::native
} // at
|
3d707a65754b7afbbca5a7079bb42bbfe90b75af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include <limits>
#include "minimizer.hpp"
namespace claragenomics
{
namespace cudamapper
{
Minimizer::Minimizer(representation_t representation, position_in_read_t position_in_read, DirectionOfRepresentation direction, read_id_t read_id)
: representation_(representation)
, position_in_read_(position_in_read)
, direction_(direction)
, read_id_(read_id)
{
}
representation_t Minimizer::representation() const
{
return representation_;
}
position_in_read_t Minimizer::position_in_read() const
{
return position_in_read_;
}
read_id_t Minimizer::read_id() const
{
return read_id_;
}
/// \brief Apply a hash function to a representation
///
/// Because of the non-Poisson distribuition of DNA, some common sequences with common kmer-content (e.g long poly-A runs)
/// may be over-represented in sketches. By applying a hash function, kmers are mapped to representations over
/// a more uniform space. The hash function implemented here was developed by Thomas Wang and is described
/// [here](https://gist.github.com/badboy/6267743). A mask is applied to the output so that all representations are mapped
/// to a 32 bit space.
///
/// \param key the input representation
__device__ representation_t wang_hash64(representation_t key)
{
uint64_t mask = (uint64_t(1) << 32) - 1;
key = (~key + (key << 21)) & mask;
key = key ^ key >> 24;
key = ((key + (key << 3)) + (key << 8)) & mask;
key = key ^ key >> 14;
key = ((key + (key << 2)) + (key << 4)) & mask;
key = key ^ key >> 28;
key = (key + (key << 31)) & mask;
return key;
}
Minimizer::DirectionOfRepresentation Minimizer::direction() const
{
return direction_;
}
/// \brief finds front end minimizers
///
/// Finds the minimizers of windows starting at position 0 and having window size range from 1 to window_size-1
///
/// \param minimizer_size kmer length
/// \param window_size number of kmers in one central minimizer window, kmers being shifted by one basepair each (for front end minimizers window size actually varies from 1 to window_size-1)
/// \param basepairs array of basepairs, first come basepairs for read 0, then read 1 and so on
/// \param read_id_to_basepairs_section index of the first basepair of every read (in basepairs array) and the number of basepairs in that read
/// \param window_minimizers_representation output array of representations of minimizers, grouped by reads
/// \param window_minimizers_direction output array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse)
/// \param window_minimizers_position_in_read output array of positions in read of minimizers, grouped by reads
/// \param read_id_to_window_section index of first element dedicated to that read in output arrays and the number of dedicated elements (enough elements are allocated to each read to support cases where each window has a different minimizer, no need to check that condition)
/// \param read_id_to_minimizers_written how many minimizers have been written for this read already (initially zero)
__global__ void find_front_end_minimizers(const std::uint64_t minimizer_size,
const std::uint64_t window_size,
const char* const basepairs,
const ArrayBlock* const read_id_to_basepairs_section,
representation_t* const window_minimizers_representation,
char* const window_minimizers_direction,
position_in_read_t* const window_minimizers_position_in_read,
const ArrayBlock* const read_id_to_windows_section,
std::uint32_t* const read_id_to_minimizers_written,
const bool hash_representations)
{
// TODO: simplify this method similarly to find_back_end_minimizers
if (1 == window_size)
{
// if 1 == window_size there are no end minimizer
return;
}
const auto input_array_first_element = read_id_to_basepairs_section[blockIdx.x].first_element_;
const auto output_arrays_offset = read_id_to_windows_section[blockIdx.x].first_element_;
// Dynamically allocating shared memory and assigning parts of it to different pointers
// Everything is 8-byte alligned
extern __shared__ std::uint64_t sm[];
// TODO: not all arrays are needed at the same time -> reduce shared memory requirements by reusing parts of the memory
// TODO: use sizeof to get the number of bytes
std::uint32_t shared_memory_64_bit_elements_already_taken = 0;
char* forward_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x elements
shared_memory_64_bit_elements_already_taken += (blockDim.x + 7) / 8;
char* reverse_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x elements
shared_memory_64_bit_elements_already_taken += (blockDim.x + 7) / 8;
representation_t* minimizers_representation = reinterpret_cast<representation_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x - (minimizer_size - 1) elements
shared_memory_64_bit_elements_already_taken += blockDim.x - (minimizer_size - 1);
char* minimizers_direction = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x - (minimizer_size - 1) elements; 0 - forward, 1 - reverse
shared_memory_64_bit_elements_already_taken += (blockDim.x - (minimizer_size - 1) + 7) / 8;
position_in_read_t* minimizers_position_in_read = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]);
shared_memory_64_bit_elements_already_taken += (blockDim.x - (minimizer_size - 1) + 1) / 2;
position_in_read_t* different_minimizer_than_neighbors = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x - (minimizer_size - 1) elements; 0 - same, 1 - different
shared_memory_64_bit_elements_already_taken += (blockDim.x - (minimizer_size - 1) + 1) / 2;
representation_t* minimizer_representation_of_largest_window_from_previous_step = (&sm[shared_memory_64_bit_elements_already_taken]); // 1 element
shared_memory_64_bit_elements_already_taken += 1;
position_in_read_t* minimizer_position_in_read_of_largest_window_from_previous_step = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element
shared_memory_64_bit_elements_already_taken += (1 + 1) / 2;
// local index = index in section of the output array dedicated to this read
position_in_read_t* local_index_to_write_next_minimizer_to = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element
shared_memory_64_bit_elements_already_taken += (1 + 1) / 2;
// TODO: Move to constant memory
char* forward_to_reverse_complement = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // 8 elements
if (0 == threadIdx.x)
{
forward_to_reverse_complement[0b000] = 0b0000;
forward_to_reverse_complement[0b001] = 0b0100; // A -> T (0b1 -> 0b10100)
forward_to_reverse_complement[0b010] = 0b0000;
forward_to_reverse_complement[0b011] = 0b0111; // C -> G (0b11 -> 0b111)
forward_to_reverse_complement[0b100] = 0b0001; // T -> A (0b10100 -> 0b1)
forward_to_reverse_complement[0b101] = 0b0000;
forward_to_reverse_complement[0b110] = 0b0000;
forward_to_reverse_complement[0b111] = 0b0011; // G -> C (0b111 -> 0b11)
}
__syncthreads();
// Each thread loads one basepair, making it blockDim.x basepairs. Each kmer has minimizer_size elements
// Number of windows is equal to the number of kmers for end minimizer
// This means a total of blockDim.x - (minimizer_size - 1) kmer can be processed in one block, where each kmer is shifted by one one basepair compared to the previous kmer
// For blockDim.x = 6 and minimizer_size = 3 there are 6 - (3 - 1) = 4 kmers
// 0 1 2
// 1 2 3
// 2 3 4
// 3 4 5
// If more minimizers have to be processed a new step is needed, in this case meaning
// 4 5 6
// 5 6 7
// 6 7 8
// 7 8 9
// This means that a number of basepairs is loaded twice, but this a tradeoff for less complex code
const std::uint16_t windows_per_loop_step = blockDim.x - (minimizer_size - 1);
*minimizer_representation_of_largest_window_from_previous_step = 0;
*minimizer_position_in_read_of_largest_window_from_previous_step = 0;
*local_index_to_write_next_minimizer_to = 0;
for (std::uint32_t first_element_in_step = 0; first_element_in_step < window_size - 1; first_element_in_step += windows_per_loop_step)
{
// load basepairs into shared memory and calculate the lexical ordering hash
if (first_element_in_step + threadIdx.x < window_size - 1 + minimizer_size - 1)
{ // window_size - 1 + minimizer_size - 1 -> total number of basepairs needed for all front minimizers
const char bp = basepairs[input_array_first_element + first_element_in_step + threadIdx.x];
forward_basepair_hashes[threadIdx.x] = 0b11 & (bp >> 2 ^ bp >> 1);
reverse_basepair_hashes[threadIdx.x] = 0b11 & (forward_to_reverse_complement[0b111 & bp] >> 2 ^ forward_to_reverse_complement[0b111 & bp] >> 1);
}
__syncthreads();
// First front end window covers only one minimizer (the one starting at positon 0), second minimizers starting at 0 and 1 and so one until the window which covers window_size-1 minimizers
// For window_size = 7 and minimize_size = 3 this means:
// window 0: 0 1 2 (0 1 2)
// window 1: 0 1 2 3 (0 1 2; 1 2 3)
// widnow 2: 0 1 2 3 4 (0 1 2; 1 2 3; 2 3 4)
// window 3: 0 1 2 3 4 5 (0 1 2; 1 2 3; 2 3 4; 3 4 5)
// window 4: 0 1 2 3 4 5 6 (0 1 2; 1 2 3; 2 3 4; 3 4 5; 4 5 6)
// window 5: 0 1 2 3 4 5 6 7 (0 1 2; 1 2 3; 2 3 4; 3 4 5; 4 5 6; 5 6 7)
// If window_size > windows_per_loop_step the other windows have to be processed in other loop steps
// For example, for blockDim.x = 6, minimizer_size = 3 (=> windows_per_loop_step = 4) and window_size = 7:
// step 0 (first_element_in_step = 0):
// window 0: 0 1 2 (0 1 2)
// window 1: 0 1 2 3 (0 1 2; 1 2 3)
// widnow 2: 0 1 2 3 4 (0 1 2; 1 2 3; 2 3 4)
// window 3: 0 1 2 3 4 5 (0 1 2; 1 2 3; 2 3 4; 3 4 5)
// step 1 (first_element_in_step = 4):
// window 4: 0 1 2 3 4 5 6 (take results for window 3 and add: 4 5 6)
// window 5: 0 1 2 3 4 5 6 7 (take results for window 3 and add: 4 5 6; 5 6 7)
// This means that a thread has a window assigned to it when thraedIdx.x < minimizers_per_loop (for all loops other than the last one) and
// when first_element_in_step + threadIdx.x < window_size - 1
const bool thread_assigned_to_a_window = first_element_in_step + threadIdx.x < window_size - 1 && threadIdx.x < windows_per_loop_step;
// calculate minimizer for each kmer in front end windows
if (thread_assigned_to_a_window)
{ // largest front minimizer window starts at basepar 0 and goes up to window_size -1
representation_t forward_representation = 0;
representation_t reverse_representation = 0;
// TODO: It's not necessary to fully build both representations in order to determine which one is smaller. In most cases there is going to be a difference already at the first element
for (std::uint16_t i = 0; i < minimizer_size; ++i)
{
forward_representation |= forward_basepair_hashes[threadIdx.x + i] << 2 * (minimizer_size - i - 1);
reverse_representation |= reverse_basepair_hashes[threadIdx.x + i] << 2 * i;
}
if (hash_representations)
{
forward_representation = wang_hash64(forward_representation);
reverse_representation = wang_hash64(reverse_representation);
}
if (forward_representation <= reverse_representation)
{
minimizers_representation[threadIdx.x] = forward_representation;
minimizers_direction[threadIdx.x] = 0;
}
else
{
minimizers_representation[threadIdx.x] = reverse_representation;
minimizers_direction[threadIdx.x] = 1;
}
}
__syncthreads();
representation_t window_minimizer_representation = 0;
position_in_read_t window_minimizer_position_in_read = 0;
// calculate minimizer for each window
// Start by the value of the first minimizer and iteratively compare it with the other minimizers in the window
// If first_element_in_step != 0 there is no need to go through all minimizers in the window. One can take the minimizer of window first_element_in_step-1
// as the current window would check exaclty the same minimizers before checking minimizer first_element_in_step
if (thread_assigned_to_a_window)
{
if (first_element_in_step != 0)
{
window_minimizer_representation = *minimizer_representation_of_largest_window_from_previous_step;
window_minimizer_position_in_read = *minimizer_position_in_read_of_largest_window_from_previous_step;
if (minimizers_representation[0] <= window_minimizer_representation)
{
window_minimizer_representation = minimizers_representation[0];
window_minimizer_position_in_read = first_element_in_step;
}
}
else
{
window_minimizer_representation = minimizers_representation[0];
window_minimizer_position_in_read = 0;
}
// All threads have to wait for the largest block to finish. Probably no better solution without big restructuring
// If there are several minimizers with the same representation only save the latest one (thus <=), others will be covered by smaller windows
for (std::uint16_t i = 1; i <= threadIdx.x; ++i)
{
if (minimizers_representation[i] <= window_minimizer_representation)
{
window_minimizer_representation = minimizers_representation[i];
window_minimizer_position_in_read = first_element_in_step + i;
}
}
minimizers_position_in_read[threadIdx.x] = window_minimizer_position_in_read;
}
__syncthreads();
// only write first occurence of each minimizer to the output array
// Hash of the last kmer in a window can be a minimizer only if it is smaller or equal than the minimizer of the previous window
// That means that the minimizer of the current window should only be written if it is different than the one of the previous window
// Otherwise it it the same minimizer and there is no need write to the the output array
// Imagine that hash representation of windows are are follows (the number in the parentheses marks the position of the last occurance of the minimizer with that representation):
// 8, 89, 898, 8987, 89878, 898785, 8987856, 89878562
// Minimizers of these windows are
// 8(0) 8(0) 8(2) 7(3) 7(3) 5(5) 5(5) 2(7)
// If we use 1 to indicate the first occurence of minimizer and 0 for repretition we get
// 1 0 1 1 0 1 0 1
// If we do an an inclusive scan on this array we get the indices to which the unique minimizers should be written to (plus one)
// 1 1 2 3 3 4 4 5
// From this it's clear that only the windows whose value is larger than the one of its neighbor should write its minimizer and it should write to the element with the index of value-1
if (first_element_in_step + threadIdx.x < window_size - 1 && threadIdx.x < windows_per_loop_step)
{
if (0 == first_element_in_step && 0 == threadIdx.x)
{
// minimizer of first window is unique for sure as it has no left neighbor
different_minimizer_than_neighbors[0] = 1;
}
else
{
representation_t neighbors_minimizers_position_in_read = 0;
// find left neighbor's window minimizer's position in read
if (0 == threadIdx.x)
{
neighbors_minimizers_position_in_read = *minimizer_position_in_read_of_largest_window_from_previous_step;
}
else
{
// TODO: consider using warp shuffle instead of shared memory
neighbors_minimizers_position_in_read = minimizers_position_in_read[threadIdx.x - 1];
}
// check if it's the same minimizer
if (neighbors_minimizers_position_in_read == minimizers_position_in_read[threadIdx.x])
{
different_minimizer_than_neighbors[threadIdx.x] = 0;
}
else
{
different_minimizer_than_neighbors[threadIdx.x] = 1;
}
}
}
__syncthreads();
// if there are more loop steps to follow write the value and position of minimizer of the largest window
if (first_element_in_step + windows_per_loop_step < window_size - 1 && threadIdx.x == windows_per_loop_step - 1)
{
*minimizer_representation_of_largest_window_from_previous_step = window_minimizer_representation;
*minimizer_position_in_read_of_largest_window_from_previous_step = window_minimizer_position_in_read;
}
// no need to sync, these two values are not used before the next sync
// perform inclusive scan
// different_minimizer_than_neighbors changes meaning an becomes more like "output_array_index_to_write_the_value_plus_one"
// TODO: implement it using warp shuffle or use CUB
if (0 == threadIdx.x)
{
std::uint16_t i = 0;
different_minimizer_than_neighbors[i] += *local_index_to_write_next_minimizer_to;
for (i = 1; i < blockDim.x - (minimizer_size - 1); ++i)
{
different_minimizer_than_neighbors[i] += different_minimizer_than_neighbors[i - 1];
}
}
__syncthreads();
// now save minimizers to output array
if (first_element_in_step + threadIdx.x < window_size - 1 && threadIdx.x < windows_per_loop_step)
{
const std::uint32_t neighbors_write_index = 0 == threadIdx.x ? *local_index_to_write_next_minimizer_to : different_minimizer_than_neighbors[threadIdx.x - 1];
if (neighbors_write_index < different_minimizer_than_neighbors[threadIdx.x])
{
const std::uint64_t output_index = output_arrays_offset + different_minimizer_than_neighbors[threadIdx.x] - 1;
window_minimizers_representation[output_index] = minimizers_representation[minimizers_position_in_read[threadIdx.x] - first_element_in_step];
window_minimizers_direction[output_index] = minimizers_direction[minimizers_position_in_read[threadIdx.x] - first_element_in_step];
window_minimizers_position_in_read[output_index] = minimizers_position_in_read[threadIdx.x];
}
}
__syncthreads();
// index (plus one) to which the last window minimizer was written is the number of all unique front end window minimizers
if (first_element_in_step + threadIdx.x == window_size - 1 - 1)
{
// "plus one" is already included in different_minimizer_than_neighbors as it was created by an inclusive scan
read_id_to_minimizers_written[blockIdx.x] = different_minimizer_than_neighbors[threadIdx.x];
}
// if there are more loop steps to follow write the output array index of last minimizer in this loop step
if (first_element_in_step + windows_per_loop_step <= window_size - 1 && threadIdx.x == windows_per_loop_step - 1)
{
*local_index_to_write_next_minimizer_to = different_minimizer_than_neighbors[threadIdx.x];
}
}
}
/// \brief finds central minimizers
///
/// Finds the minimizers of windows of size window_size starting at position 0 and moving by one basepair at a time
///
/// \param minimizer_size kmer length
/// \param window_size number of kmers in one window, kmers being shifted by one one basepair each
/// \param basepairs array of basepairs, first come basepairs for read 0, then read 1 and so on
/// \param read_id_to_basepairs_section index of the first basepair of every read (in basepairs array) and the number of basepairs in that read
/// \param window_minimizers_representation output array of representations of minimizers, grouped by reads
/// \param window_minimizers_direction output array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse)
/// \param window_minimizers_position_in_read output array of positions in read of minimizers, grouped by reads
/// \param read_id_to_window_section index of first element dedicated to that read in output arrays and the number of dedicated elements (enough elements are allocated to each read to support cases where each window has a different minimizer, no need to check that condition)
/// \param read_id_to_minimizers_written how many minimizers have been written for this read already (initially number of front end minimizers)
__global__ void find_central_minimizers(const std::uint64_t minimizer_size,
const std::uint64_t window_size,
const std::uint32_t basepairs_per_thread,
const char* const basepairs,
const ArrayBlock* const read_id_to_basepairs_section,
representation_t* const window_minimizers_representation,
char* const window_minimizers_direction,
position_in_read_t* const window_minimizers_position_in_read,
const ArrayBlock* const read_id_to_windows_section,
std::uint32_t* const read_id_to_minimizers_written,
const bool hash_representations)
{
// See find_front_end_minimizers for more details about the algorithm
const std::uint64_t index_of_first_element_to_process_global = read_id_to_basepairs_section[blockIdx.x].first_element_;
// Index of the element to which the first central minimizer of this read should be written. Index refers to the positions withing the whole array dedicated to all reads
const std::uint64_t output_index_to_write_the_first_minimizer_global = read_id_to_windows_section[blockIdx.x].first_element_ + read_id_to_minimizers_written[blockIdx.x];
const std::uint32_t basepairs_in_read = read_id_to_basepairs_section[blockIdx.x].block_size_;
const std::uint32_t kmers_in_read = basepairs_in_read - (minimizer_size - 1);
const std::uint32_t windows_in_read = kmers_in_read - (window_size - 1);
const std::uint16_t basepairs_per_loop_step = blockDim.x * basepairs_per_thread;
const std::uint16_t kmers_per_loop_step = basepairs_per_loop_step - (minimizer_size - 1);
const std::uint16_t windows_per_loop_step = kmers_per_loop_step - (window_size - 1);
// Dynamically allocating shared memory and assigning parts of it to different pointers
// Everything is 8-byte alligned
extern __shared__ std::uint64_t sm[];
// TODO: not all arrays are needed at the same time -> reduce shared memory requirements by reusing parts of the memory
// TODO: use sizeof to get the number of bytes
std::uint32_t shared_memory_64_bit_elements_already_taken = 0;
char* forward_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_per_loop_step elements
shared_memory_64_bit_elements_already_taken += (basepairs_per_loop_step + 7) / 8;
char* reverse_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_per_loop_step elements
shared_memory_64_bit_elements_already_taken += (basepairs_per_loop_step + 7) / 8;
representation_t* minimizers_representation = reinterpret_cast<representation_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // kmers_loop_step elements
shared_memory_64_bit_elements_already_taken += kmers_per_loop_step;
char* minimizers_direction = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_per_loop_step elements; 0 - forward, 1 - reverse
shared_memory_64_bit_elements_already_taken += (windows_per_loop_step + 7) / 8;
position_in_read_t* minimizers_position_in_read = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]);
shared_memory_64_bit_elements_already_taken += (windows_per_loop_step + 1) / 2;
position_in_read_t* different_minimizer_than_neighbors = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_per_loop_step elements; 0 - same, 1 - different
shared_memory_64_bit_elements_already_taken += (windows_per_loop_step + 1) / 2;
position_in_read_t* minimizer_position_in_read_of_largest_window_from_previous_step = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element
shared_memory_64_bit_elements_already_taken += (1 + 1) / 2;
position_in_read_t* local_index_to_write_next_minimizer_to = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element
shared_memory_64_bit_elements_already_taken += (1 + 1) / 2;
// TODO: Move to constant memory
char* forward_to_reverse_complement = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // 8 elements
if (0 == threadIdx.x)
{
forward_to_reverse_complement[0b000] = 0b0000;
forward_to_reverse_complement[0b001] = 0b0100; // A -> T (0b1 -> 0b10100)
forward_to_reverse_complement[0b010] = 0b0000;
forward_to_reverse_complement[0b011] = 0b0111; // C -> G (0b11 -> 0b111)
forward_to_reverse_complement[0b100] = 0b0001; // T -> A (0b10100 -> 0b1)
forward_to_reverse_complement[0b101] = 0b0000;
forward_to_reverse_complement[0b110] = 0b0000;
forward_to_reverse_complement[0b111] = 0b0011; // G -> C (0b111 -> 0b11)
}
__syncthreads();
// if there are front minimizers take them into account
if (0 != read_id_to_minimizers_written[blockIdx.x])
{
*minimizer_position_in_read_of_largest_window_from_previous_step = window_minimizers_position_in_read[output_index_to_write_the_first_minimizer_global - 1];
*local_index_to_write_next_minimizer_to = read_id_to_minimizers_written[blockIdx.x];
}
else
{
*minimizer_position_in_read_of_largest_window_from_previous_step = 0; // N/A
*local_index_to_write_next_minimizer_to = 0;
}
for (std::uint32_t first_element_in_step = 0; first_element_in_step < windows_in_read; first_element_in_step += windows_per_loop_step)
{
// load basepairs into shared memory and calculate the lexical ordering hash
for (std::uint32_t basepair_index = threadIdx.x; basepair_index < basepairs_per_loop_step && first_element_in_step + basepair_index < basepairs_in_read; basepair_index += blockDim.x)
{
const char bp = basepairs[index_of_first_element_to_process_global + first_element_in_step + basepair_index];
forward_basepair_hashes[basepair_index] = 0b11 & (bp >> 2 ^ bp >> 1);
reverse_basepair_hashes[basepair_index] = 0b11 & (forward_to_reverse_complement[0b111 & bp] >> 2 ^ forward_to_reverse_complement[0b111 & bp] >> 1);
}
__syncthreads();
// calculate kmer minimizers
for (std::uint32_t kmer_index = threadIdx.x; kmer_index < kmers_per_loop_step && first_element_in_step + kmer_index < kmers_in_read; kmer_index += blockDim.x)
{
representation_t forward_representation = 0;
representation_t reverse_representation = 0;
// TODO: It's not necessary to fully build both representations in order to determine which one is smaller. In most cases there is going to be a difference already at the first element
for (std::uint16_t i = 0; i < minimizer_size; ++i)
{
forward_representation |= forward_basepair_hashes[kmer_index + i] << 2 * (minimizer_size - i - 1);
reverse_representation |= reverse_basepair_hashes[kmer_index + i] << 2 * i;
}
if (hash_representations)
{
forward_representation = wang_hash64(forward_representation);
reverse_representation = wang_hash64(reverse_representation);
}
if (forward_representation <= reverse_representation)
{
minimizers_representation[kmer_index] = forward_representation;
minimizers_direction[kmer_index] = 0;
}
else
{
minimizers_representation[kmer_index] = reverse_representation;
minimizers_direction[kmer_index] = 1;
}
}
__syncthreads();
position_in_read_t window_minimizer_position_in_read = 0;
// find window minimizer
for (std::uint32_t window_index = threadIdx.x; window_index < windows_per_loop_step && first_element_in_step + window_index < windows_in_read; window_index += blockDim.x)
{
// assume that the minimizer of the first kmer in step is the window minimizer
representation_t window_minimizer_representation = minimizers_representation[window_index];
window_minimizer_position_in_read = first_element_in_step + window_index;
// now check the minimizers of all other windows
for (std::uint16_t i = 1; i < window_size; ++i)
{
if (minimizers_representation[window_index + i] <= window_minimizer_representation)
{
window_minimizer_representation = minimizers_representation[window_index + i];
window_minimizer_position_in_read = first_element_in_step + window_index + i;
}
}
minimizers_position_in_read[window_index] = window_minimizer_position_in_read;
}
__syncthreads();
// check if the window to the left has a the same minimizer
for (std::uint32_t window_index = threadIdx.x; window_index < windows_per_loop_step && first_element_in_step + window_index < windows_in_read; window_index += blockDim.x)
{
// if this is the first window in read and there were no front end minimizers than this is the first occurence of this minimizer
if (0 == first_element_in_step + window_index && 0 == read_id_to_minimizers_written[blockIdx.x])
{
different_minimizer_than_neighbors[0] = 1;
}
else
{
representation_t neighbors_minimizers_position_in_read = 0;
// find left neighbor's window minimizer's position in read
if (0 == window_index)
{
neighbors_minimizers_position_in_read = *minimizer_position_in_read_of_largest_window_from_previous_step;
}
else
{
// TODO: consider using warp shuffle instead of shared memory
neighbors_minimizers_position_in_read = minimizers_position_in_read[window_index - 1];
}
// check if it's the same minimizer
if (neighbors_minimizers_position_in_read == minimizers_position_in_read[window_index])
{
different_minimizer_than_neighbors[window_index] = 0;
}
else
{
different_minimizer_than_neighbors[window_index] = 1;
}
}
}
__syncthreads();
// if there are more loop steps to follow write the position of minimizer of the last window
// "windows_per_loop_step % blockDim.x - 1" determines the thread which processes the last window
if (first_element_in_step + windows_per_loop_step < windows_in_read && threadIdx.x == windows_per_loop_step % blockDim.x - 1)
{
*minimizer_position_in_read_of_largest_window_from_previous_step = window_minimizer_position_in_read;
}
// perform inclusive scan
// different_minimizer_than_neighbors changes meaning an becomes more like "output_array_index_to_write_the_value_plus_one"
// TODO: implement it using warp shuffle or use CUB
if (0 == threadIdx.x)
{
std::uint16_t i = 0;
different_minimizer_than_neighbors[i] += *local_index_to_write_next_minimizer_to;
for (i = 1; i < windows_per_loop_step && first_element_in_step + i < windows_in_read; ++i)
{
different_minimizer_than_neighbors[i] += different_minimizer_than_neighbors[i - 1];
}
}
__syncthreads();
// now save minimizers to output array
for (std::uint32_t window_index = threadIdx.x; window_index < windows_per_loop_step && first_element_in_step + window_index < windows_in_read; window_index += blockDim.x)
{
// if first_element_in_loop == 0 and window_index == 0 then *local_index_to_write_next_minimizer_to is set to 0 before entering the loop
const std::uint32_t neighbors_write_index = 0 == window_index ? *local_index_to_write_next_minimizer_to : different_minimizer_than_neighbors[window_index - 1];
if (neighbors_write_index < different_minimizer_than_neighbors[window_index])
{
// output array offset added in inclusive sum
const auto output_index = read_id_to_windows_section[blockIdx.x].first_element_ + different_minimizer_than_neighbors[window_index] - 1;
window_minimizers_representation[output_index] = minimizers_representation[minimizers_position_in_read[window_index] - first_element_in_step];
window_minimizers_direction[output_index] = minimizers_direction[minimizers_position_in_read[window_index] - first_element_in_step];
window_minimizers_position_in_read[output_index] = minimizers_position_in_read[window_index];
}
}
__syncthreads();
// increase the number of written minimizers by the number of central minimizers
// the value is increased by the write index of the last window in read
if (first_element_in_step + windows_per_loop_step >= windows_in_read && 0 == threadIdx.x)
{ // only do it when there is not going to be new loop step
read_id_to_minimizers_written[blockIdx.x] = different_minimizer_than_neighbors[windows_in_read - first_element_in_step - 1]; // write the index of the last window
}
// if there are more loop steps to follow write the output array index of the last minimizer in this loop step
if (first_element_in_step + windows_per_loop_step < windows_in_read && 0 == threadIdx.x)
{
*local_index_to_write_next_minimizer_to = different_minimizer_than_neighbors[windows_per_loop_step - 1]; // index of last written minimizer + 1
}
}
}
/// \brief finds back end minimizers
///
/// Finds the minimizers of windows ending end the last basepair and having window size range from 1 to window_size-1
///
/// \param minimizer_size kmer length
/// \param window_size number of kmers in one central minimizer window, kmers being shifted by one basepair each (for back end minimizers window size actually varies from 1 to window_size-1)
/// \param basepairs array of basepairs, first come basepairs for read 0, then read 1 and so on
/// \param read_id_to_basepairs_section index of the first basepair of every read (in basepairs array) and the number of basepairs in that read
/// \param window_minimizers_representation output array of representations of minimizers, grouped by reads
/// \param window_minimizers_direction output array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse)
/// \param window_minimizers_position_in_read output array of positions in read of minimizers, grouped by reads
/// \param read_id_to_window_section index of first element dedicated to that read in output arrays and the number of dedicated elements (enough elements are allocated to each read to support cases where each window has a different minimizer, no need to check that condition)
/// \param read_id_to_minimizers_written how many minimizers have been written for this read already (initially number of front end and central minimizers)
__global__ void find_back_end_minimizers(const std::uint64_t minimizer_size,
const std::uint64_t window_size,
const char* const basepairs,
const ArrayBlock* const read_id_to_basepairs_section,
representation_t* const window_minimizers_representation,
char* const window_minimizers_direction,
position_in_read_t* const window_minimizers_position_in_read,
const ArrayBlock* const read_id_to_windows_section,
std::uint32_t* const read_id_to_minimizers_written,
const bool hash_representations)
{
// See find_front_end_minimizers for more details about the algorithm
if (1 == window_size)
{
// if 1 == window_size there are no end minimizer
return;
}
// Index of first basepair which belongs to the largest back end minimizers. Index of that basepair within the read
const auto index_of_first_element_to_process_local = read_id_to_basepairs_section[blockIdx.x].block_size_ - (window_size - 1 + minimizer_size - 1);
// Index of first basepair which belongs to the largest back end minimizers. Index of that basepair within the whole array of basepairs for all reads
const auto index_of_first_element_to_process_global = read_id_to_basepairs_section[blockIdx.x].first_element_ + index_of_first_element_to_process_local;
// Index of the element to which the first back end minimizer of this read should be written. Index refers to the positions withing the section dedicate to this read
const auto output_index_to_write_the_first_minimizer_local = read_id_to_minimizers_written[blockIdx.x];
// Index of the element to which the first back end minimizer of this read should be written. Index refers to the positions withing the whole array dedicated to all reads
const auto output_index_to_write_the_first_minimizer_global = read_id_to_windows_section[blockIdx.x].first_element_ + output_index_to_write_the_first_minimizer_local;
// Dynamically allocating shared memory and assigning parts of it to different pointers
// Everything is 8-byte alligned
extern __shared__ std::uint64_t sm[];
// TODO: not all arrays are needed at the same time -> reduce shared memory requirements by reusing parts of the memory
// TODO: use sizeof to get the number of bytes
std::uint32_t shared_memory_64_bit_elements_already_taken = 0;
char* forward_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_to_process elements
shared_memory_64_bit_elements_already_taken += (window_size - 1 + minimizer_size - 1 + 7) / 8;
char* reverse_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_to_process elements
shared_memory_64_bit_elements_already_taken += (window_size - 1 + minimizer_size - 1 + 7) / 8;
representation_t* minimizers_representation = reinterpret_cast<representation_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // kmers_to_process elements
shared_memory_64_bit_elements_already_taken += window_size - 1;
char* minimizers_direction = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // kmers_to_process elements; 0 - forward, 1 - reverse
shared_memory_64_bit_elements_already_taken += (window_size - 1 + 7) / 8;
position_in_read_t* minimizers_position_in_read = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_to_process elements
shared_memory_64_bit_elements_already_taken += (window_size - 1 + 1) / 2;
position_in_read_t* different_minimizer_than_neighbors = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_to_process elements; 0 - same, 1 - different
shared_memory_64_bit_elements_already_taken += (window_size - 1 + 1) / 2;
// TODO: Move to constant memory
char* forward_to_reverse_complement = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // 8 elements
if (0 == threadIdx.x)
{
forward_to_reverse_complement[0b000] = 0b0000;
forward_to_reverse_complement[0b001] = 0b0100; // A -> T (0b1 -> 0b10100)
forward_to_reverse_complement[0b010] = 0b0000;
forward_to_reverse_complement[0b011] = 0b0111; // C -> G (0b11 -> 0b111)
forward_to_reverse_complement[0b100] = 0b0001; // T -> A (0b10100 -> 0b1)
forward_to_reverse_complement[0b101] = 0b0000;
forward_to_reverse_complement[0b110] = 0b0000;
forward_to_reverse_complement[0b111] = 0b0011; // G -> C (0b111 -> 0b11)
}
__syncthreads();
// There are only window_size-1 back end windows. window_size usually has the value of a few dozens
// Having windows_size so large that it does not fit the shared memory is unlikely
// If that happens implement this method similarly to find_central_minimizers
// load basepairs into shared memory and calculate the lexical ordering hash
for (std::uint16_t basepair_index = threadIdx.x; basepair_index < window_size - 1 + minimizer_size - 1; basepair_index += blockDim.x)
{
const char bp = basepairs[index_of_first_element_to_process_global + basepair_index];
forward_basepair_hashes[basepair_index] = 0b11 & (bp >> 2 ^ bp >> 1);
reverse_basepair_hashes[basepair_index] = 0b11 & (forward_to_reverse_complement[0b111 & bp] >> 2 ^ forward_to_reverse_complement[0b111 & bp] >> 1);
}
__syncthreads();
// calculate kmer minimizers
// For back end minimizers the number of kmers is the same as the number of windows
for (std::uint16_t kmer_index = threadIdx.x; kmer_index < window_size - 1; kmer_index += blockDim.x)
{
representation_t forward_representation = 0;
representation_t reverse_representation = 0;
// TODO: It's not necessary to fully build both representations in order to determine which one is smaller. In most cases there is going to be a difference already at the first element
for (std::uint16_t i = 0; i < minimizer_size; ++i)
{
forward_representation |= forward_basepair_hashes[kmer_index + i] << 2 * (minimizer_size - i - 1);
reverse_representation |= reverse_basepair_hashes[kmer_index + i] << 2 * i;
}
if (hash_representations)
{
forward_representation = wang_hash64(forward_representation);
reverse_representation = wang_hash64(reverse_representation);
}
if (forward_representation <= reverse_representation)
{
minimizers_representation[kmer_index] = forward_representation;
minimizers_direction[kmer_index] = 0;
}
else
{
minimizers_representation[kmer_index] = reverse_representation;
minimizers_direction[kmer_index] = 1;
}
}
__syncthreads();
// find window minimizer
for (std::uint16_t window_index = threadIdx.x; window_index < window_size - 1; window_index += blockDim.x)
{
// assume that the first kmer in the window is the minimizer
representation_t window_minimizer_representation = minimizers_representation[window_index];
position_in_read_t window_minimizer_position_in_read = index_of_first_element_to_process_local + window_index;
// now check other kmers in the window (note that this the back end minimizer, so not all windows have the same length)
for (std::uint16_t i = 1; window_index + i < window_size - 1; ++i)
{
if (minimizers_representation[window_index + i] <= window_minimizer_representation)
{
window_minimizer_representation = minimizers_representation[window_index + i];
window_minimizer_position_in_read = index_of_first_element_to_process_local + window_index + i;
}
}
minimizers_position_in_read[window_index] = window_minimizer_position_in_read;
}
__syncthreads();
// check if the window to the left has a the same minimizer
for (std::uint16_t window_index = threadIdx.x; window_index < window_size - 1; window_index += blockDim.x)
{
representation_t neighbors_minimizers_position_in_read = 0;
// find left neighbor's window minimizer's position in read
if (0 == window_index)
{
// if this is the first window take the position of the minimizer of the last central minimizer
neighbors_minimizers_position_in_read = window_minimizers_position_in_read[output_index_to_write_the_first_minimizer_global - 1];
}
else
{
// TODO: consider using warp shuffle instead of shared memory
neighbors_minimizers_position_in_read = minimizers_position_in_read[window_index - 1];
}
// check if it's the same minimizer
if (neighbors_minimizers_position_in_read == minimizers_position_in_read[window_index])
{
different_minimizer_than_neighbors[window_index] = 0;
}
else
{
different_minimizer_than_neighbors[window_index] = 1;
}
}
__syncthreads();
// perform inclusive scan
// different_minimizer_than_neighbors changes meaning an becomes more like "output_array_index_to_write_the_value_plus_one"
// TODO: implement it using warp shuffle or use CUB
if (0 == threadIdx.x)
{
// read_id_to_minimizers_written[blockIdx.x] is the index of the last written plus one
different_minimizer_than_neighbors[0] += output_index_to_write_the_first_minimizer_local;
for (std::uint16_t i = 1; i < window_size - 1; ++i)
{
different_minimizer_than_neighbors[i] += different_minimizer_than_neighbors[i - 1];
}
}
__syncthreads();
// now save minimizers to output array
for (std::uint16_t window_index = threadIdx.x; window_index < window_size - 1; window_index += blockDim.x)
{
// different_minimizer_than_neighbors contians an inclusive scan, i.e. it's index_to_write_to + 1
const std::uint32_t neighbors_write_index = 0 == window_index ? output_index_to_write_the_first_minimizer_local : different_minimizer_than_neighbors[window_index - 1];
if (neighbors_write_index < different_minimizer_than_neighbors[window_index])
{
// to get the actual index to write to do -1 to different_minimizer_than_neighbors
const auto output_index = read_id_to_windows_section[blockIdx.x].first_element_ + different_minimizer_than_neighbors[window_index] - 1;
// substract index_of_first_element_to_process_local to get the index in shared memory
window_minimizers_representation[output_index] = minimizers_representation[minimizers_position_in_read[window_index] - index_of_first_element_to_process_local];
window_minimizers_direction[output_index] = minimizers_direction[minimizers_position_in_read[window_index] - index_of_first_element_to_process_local];
window_minimizers_position_in_read[output_index] = minimizers_position_in_read[window_index];
}
}
__syncthreads();
// save the write index of the last written minimizer
if (0 == threadIdx.x)
{
read_id_to_minimizers_written[blockIdx.x] = different_minimizer_than_neighbors[window_size - 1 - 1];
}
}
/// \brief packs minimizers of different reads together
///
/// window_minimizers_representation, window_minimizers_position_in_read and window_minimizers_direction all allocate one element for each window in the read.
/// Many windows share the same minimizer and each minimizer is written only once, meaning many elements do not contain minimizers.
/// This function creates new arrays where such elements do not exist.
/// Note that in the input arrays all minimizers of one read are written consecutively, i.e. [read 0 minimizers], [read 0 junk], [read 1 minimizers], [read 1 junk], [read 2 minimizers]...
///
/// \param window_minimizers_representation array of representations of minimizers, grouped by reads
/// \param window_minimizers_position_in_read array of positions in read of minimizers, grouped by reads
/// \param window_minimizers_direction array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse)
/// \param read_id_to_windows_section index of first element dedicated to that read in input arrays and the number of dedicated elements
/// \param representations_compressed array of representations of minimizers, grouped by reads, without invalid elements between the reads
/// \param rest_compressed array of read_ids, positions_in_read and directions of reads, grouped by reads, without invalid elements between the reads
/// \param read_id_to_compressed_minimizers index of first element dedicated to that read in input arrays and the number of dedicated elements
__global__ void compress_minimizers(const representation_t* const window_minimizers_representation,
const position_in_read_t* const window_minimizers_position_in_read,
const char* const window_minimizers_direction,
const ArrayBlock* const read_id_to_windows_section,
representation_t* const representations_compressed,
Minimizer::ReadidPositionDirection* const rest_compressed,
const ArrayBlock* const read_id_to_compressed_minimizers,
std::uint32_t offset)
{
const auto& first_input_minimizer = read_id_to_windows_section[blockIdx.x].first_element_;
const auto& first_output_minimizer = read_id_to_compressed_minimizers[blockIdx.x].first_element_;
const auto& number_of_minimizers = read_id_to_compressed_minimizers[blockIdx.x].block_size_;
for (std::uint32_t i = threadIdx.x; i < number_of_minimizers; i += blockDim.x)
{
representations_compressed[first_output_minimizer + i] = window_minimizers_representation[first_input_minimizer + i];
rest_compressed[first_output_minimizer + i].read_id_ = blockIdx.x + offset;
rest_compressed[first_output_minimizer + i].position_in_read_ = window_minimizers_position_in_read[first_input_minimizer + i];
rest_compressed[first_output_minimizer + i].direction_ = window_minimizers_direction[first_input_minimizer + i];
}
}
Minimizer::GeneratedSketchElements Minimizer::generate_sketch_elements(std::shared_ptr<DeviceAllocator> allocator, const std::uint64_t number_of_reads_to_add,
const std::uint64_t minimizer_size,
const std::uint64_t window_size,
const std::uint64_t read_id_of_first_read,
const device_buffer<char>& merged_basepairs_d,
const std::vector<ArrayBlock>& read_id_to_basepairs_section_h,
const device_buffer<ArrayBlock>& read_id_to_basepairs_section_d,
const bool hash_representations)
{
// for each read find the maximum number of minimizers (one per window), determine their section in the minimizer arrays and allocate the arrays
std::uint64_t total_windows = 0;
std::vector<ArrayBlock> read_id_to_windows_section_h(number_of_reads_to_add, {0, 0});
for (read_id_t read_id = 0; read_id < number_of_reads_to_add; ++read_id)
{
read_id_to_windows_section_h[read_id].first_element_ = total_windows;
std::uint32_t windows = window_size - 1; // front end minimizers
windows += read_id_to_basepairs_section_h[read_id].block_size_ - (minimizer_size + window_size - 1) + 1; // central minimizers
windows += window_size - 1;
read_id_to_windows_section_h[read_id].block_size_ = windows;
total_windows += windows;
}
CGA_LOG_INFO("Allocating {} bytes for read_id_to_windows_section_d", read_id_to_windows_section_h.size() * sizeof(decltype(read_id_to_windows_section_h)::value_type));
device_buffer<decltype(read_id_to_windows_section_h)::value_type> read_id_to_windows_section_d(read_id_to_windows_section_h.size(), allocator);
CGA_CU_CHECK_ERR(hipMemcpy(read_id_to_windows_section_d.data(),
read_id_to_windows_section_h.data(),
read_id_to_windows_section_h.size() * sizeof(decltype(read_id_to_windows_section_h)::value_type),
hipMemcpyHostToDevice));
CGA_LOG_INFO("Allocating {} bytes for window_minimizers_representation_d", total_windows * sizeof(representation_t));
device_buffer<representation_t> window_minimizers_representation_d(total_windows, allocator);
CGA_LOG_INFO("Allocating {} bytes for window_minimizers_direction_d", total_windows * sizeof(char));
device_buffer<char> window_minimizers_direction_d(total_windows, allocator);
CGA_LOG_INFO("Allocating {} bytes for window_minimizers_position_in_read_d", total_windows * sizeof(position_in_read_t));
device_buffer<position_in_read_t> window_minimizers_position_in_read_d(total_windows, allocator);
CGA_LOG_INFO("Allocating {} bytes for read_id_to_minimizers_written_d", number_of_reads_to_add * sizeof(std::uint32_t));
device_buffer<std::uint32_t> read_id_to_minimizers_written_d(number_of_reads_to_add, allocator);
// initially there are no minimizers written to the output arrays
CGA_CU_CHECK_ERR(hipMemset(read_id_to_minimizers_written_d.data(), 0, number_of_reads_to_add * sizeof(std::uint32_t)));
// *** front end minimizers ***
std::uint32_t num_of_basepairs_for_front_minimizers = (window_size - 1) + minimizer_size - 1;
std::uint32_t num_of_threads = ::min(num_of_basepairs_for_front_minimizers, 64u);
// largest window in end minimizers has the size of window_size-1, meaning it covers window_size-1 + minimizer_size - 1 basepairs
const std::uint32_t basepairs_for_end_minimizers = (window_size - 1 + minimizer_size - 1);
const std::uint32_t kmers_for_end_minimizers = window_size - 1; // for end minimizers number of kmers is the as the number of windows because the last window has only one kmer
const std::uint32_t windows_for_end_minimizers = window_size - 1;
// determine total ammount for shared memory needed (see kernel for clarification)
// shared memeory is alligned to 8 bytes, so for 1-byte variables (x+7)/8 values are allocate (for 10 1-byte elements (10+7)/8=17/8=2 8-byte elements are allocated, instead of 10/1=1 which would be wrong)
// the final number of allocated 8-byte values is multiplied by 8 at the end in order to get number of bytes needed
std::uint32_t shared_memory_for_kernel = 0;
shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // forward basepairs (char)
shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // reverse basepairs (char)
shared_memory_for_kernel += (kmers_for_end_minimizers); // representations of minimizers (representation_t)
shared_memory_for_kernel += (windows_for_end_minimizers + 7) / 8; // directions of representations of minimizers (char)
shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // position_in_read of minimizers (position_in_read_t)
shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // does the window have a different minimizer than its left neighbor (position_in_read_t)
shared_memory_for_kernel += 1; // representation from previous step
shared_memory_for_kernel += (1 + 1) / 2; // position from previous step (char)
shared_memory_for_kernel += (1 + 1) / 2; // inclusive sum from previous step (position_in_read_t)
shared_memory_for_kernel += 8 / 8; // forward -> reverse complement conversion (char)
shared_memory_for_kernel *= 8; // before it the number of 8-byte values, now get the number of bytes
CGA_LOG_INFO("Launching find_front_end_minimizers with {} bytes of shared memory", shared_memory_for_kernel);
hipLaunchKernelGGL(( find_front_end_minimizers), dim3(number_of_reads_to_add), dim3(num_of_threads), shared_memory_for_kernel, 0, minimizer_size,
window_size,
merged_basepairs_d.data(),
read_id_to_basepairs_section_d.data(),
window_minimizers_representation_d.data(),
window_minimizers_direction_d.data(),
window_minimizers_position_in_read_d.data(),
read_id_to_windows_section_d.data(),
read_id_to_minimizers_written_d.data(),
hash_representations);
CGA_CU_CHECK_ERR(hipStreamSynchronize(0));
// *** central minimizers ***
const std::uint32_t basepairs_per_thread = 8; // arbitrary, tradeoff between the number of thread blocks that can be scheduled simultaneously and the number of basepairs which have to be loaded multiple times beacuse only basepairs_per_thread*num_of_threads-(window_size_ + minimizer_size_ - 1) + 1 can be processed at once, i.e. window_size+minimizer_size-2 basepairs have to be loaded again
num_of_threads = 64; // arbitrary
const std::uint32_t basepairs_in_loop_step = num_of_threads * basepairs_per_thread;
const std::uint32_t minimizers_in_loop_step = basepairs_in_loop_step - minimizer_size + 1;
const std::uint32_t windows_in_loop_step = minimizers_in_loop_step - window_size + 1;
shared_memory_for_kernel = 0;
shared_memory_for_kernel += (basepairs_in_loop_step + 7) / 8; // forward basepairs (char)
shared_memory_for_kernel += (basepairs_in_loop_step + 7) / 8; // reverse basepairs (char)
shared_memory_for_kernel += minimizers_in_loop_step; // representations of minimizers (representation_t)
shared_memory_for_kernel += (windows_in_loop_step + 7) / 8; // directions of representations of minimizers (char)
shared_memory_for_kernel += (windows_in_loop_step + 1) / 2; // position_in_read of minimizers (position_in_read_t)
shared_memory_for_kernel += (windows_in_loop_step + 1) / 2; // does the window have a different minimizer than its left neighbor
shared_memory_for_kernel += (1 + 1) / 2; // position from previous step (char)
shared_memory_for_kernel += (1 + 1) / 2; // inclusive sum from previous step (position_in_read_t)
shared_memory_for_kernel += 8 / 8; // forward -> reverse complement conversion (char)
shared_memory_for_kernel *= 8; // before it the number of 8-byte values, now get the number of bytes
CGA_LOG_INFO("Launching find_central_minimizers with {} bytes of shared memory", shared_memory_for_kernel);
hipLaunchKernelGGL(( find_central_minimizers), dim3(number_of_reads_to_add), dim3(num_of_threads), shared_memory_for_kernel, 0, minimizer_size,
window_size,
basepairs_per_thread,
merged_basepairs_d.data(),
read_id_to_basepairs_section_d.data(),
window_minimizers_representation_d.data(),
window_minimizers_direction_d.data(),
window_minimizers_position_in_read_d.data(),
read_id_to_windows_section_d.data(),
read_id_to_minimizers_written_d.data(),
hash_representations);
CGA_CU_CHECK_ERR(hipStreamSynchronize(0));
// *** back end minimizers ***
num_of_threads = 64;
// largest window should fit shared memory
shared_memory_for_kernel = 0;
shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // forward basepairs (char)
shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // reverse basepairs (char)
shared_memory_for_kernel += kmers_for_end_minimizers; // representations of minimizers (representation_t)
shared_memory_for_kernel += (kmers_for_end_minimizers + 7) / 8; // directions of representations of minimizers (char)
shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // position_in_read of minimizers (position_in_read_t)
shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // does the window have a different minimizer than its left neighbor
shared_memory_for_kernel += 8 / 8; // forward -> reverse complement conversion (char)
shared_memory_for_kernel *= 8; // before it the number of 8-byte values, now get the number of bytes
CGA_LOG_INFO("Launching find_back_end_minimizers with {} bytes of shared memory", shared_memory_for_kernel);
hipLaunchKernelGGL(( find_back_end_minimizers), dim3(number_of_reads_to_add), dim3(num_of_threads), shared_memory_for_kernel, 0, minimizer_size,
window_size,
merged_basepairs_d.data(),
read_id_to_basepairs_section_d.data(),
window_minimizers_representation_d.data(),
window_minimizers_direction_d.data(),
window_minimizers_position_in_read_d.data(),
read_id_to_windows_section_d.data(),
read_id_to_minimizers_written_d.data(),
hash_representations);
CGA_CU_CHECK_ERR(hipStreamSynchronize(0));
std::vector<std::uint32_t> read_id_to_minimizers_written_h(number_of_reads_to_add);
CGA_CU_CHECK_ERR(hipMemcpy(read_id_to_minimizers_written_h.data(),
read_id_to_minimizers_written_d.data(),
read_id_to_minimizers_written_h.size() * sizeof(decltype(read_id_to_minimizers_written_h)::value_type),
hipMemcpyDeviceToHost));
CGA_LOG_INFO("Deallocating {} bytes from read_id_to_minimizers_written_d", read_id_to_minimizers_written_d.size() * sizeof(decltype(read_id_to_minimizers_written_d)::value_type));
read_id_to_minimizers_written_d.free();
// *** remove unused elemets from the window minimizers arrays ***
// In window_minimizers_representation_d and other arrays enough space was allocated to support cases where each window has a different minimizers. In reality many neighboring windows share the same mininizer
// As a result there are areas of meaningless data between minimizers belonging to different reads (space_allocated_for_all_possible_minimizers_of_a_read - space_needed_for_the_actuall_minimizers)
// At this point all mininizer are put together (compressed) so that the last minimizer of one read is next to the first minimizer of another read
// Data is organized in two arrays in order to support usage of thrust::stable_sort_by_key. One contains representations (key) and the other the rest (values)
std::vector<ArrayBlock> read_id_to_compressed_minimizers_h(number_of_reads_to_add, {0, 0});
std::uint64_t total_minimizers = 0;
for (std::size_t read_id = 0; read_id < read_id_to_minimizers_written_h.size(); ++read_id)
{
read_id_to_compressed_minimizers_h[read_id].first_element_ = total_minimizers;
read_id_to_compressed_minimizers_h[read_id].block_size_ = read_id_to_minimizers_written_h[read_id];
total_minimizers += read_id_to_minimizers_written_h[read_id];
}
CGA_LOG_INFO("Allocating {} bytes for read_id_to_compressed_minimizers_d", read_id_to_compressed_minimizers_h.size() * sizeof(decltype(read_id_to_compressed_minimizers_h)::value_type));
device_buffer<decltype(read_id_to_compressed_minimizers_h)::value_type> read_id_to_compressed_minimizers_d(read_id_to_compressed_minimizers_h.size(), allocator);
CGA_CU_CHECK_ERR(hipMemcpy(read_id_to_compressed_minimizers_d.data(),
read_id_to_compressed_minimizers_h.data(),
read_id_to_compressed_minimizers_h.size() * sizeof(decltype(read_id_to_compressed_minimizers_h)::value_type),
hipMemcpyHostToDevice));
CGA_LOG_INFO("Allocating {} bytes for representations_compressed_d", total_minimizers * sizeof(representation_t));
device_buffer<representation_t> representations_compressed_d(total_minimizers, allocator);
// rest = position_in_read, direction and read_id
CGA_LOG_INFO("Allocating {} bytes for rest_compressed_d", total_minimizers * sizeof(ReadidPositionDirection));
device_buffer<ReadidPositionDirection> rest_compressed_d(total_minimizers, allocator);
CGA_LOG_INFO("Launching compress_minimizers with {} bytes of shared memory", 0);
hipLaunchKernelGGL(( compress_minimizers), dim3(number_of_reads_to_add), dim3(128), 0, 0, window_minimizers_representation_d.data(),
window_minimizers_position_in_read_d.data(),
window_minimizers_direction_d.data(),
read_id_to_windows_section_d.data(),
representations_compressed_d.data(),
rest_compressed_d.data(),
read_id_to_compressed_minimizers_d.data(),
read_id_of_first_read);
CGA_CU_CHECK_ERR(hipStreamSynchronize(0));
// free these arrays as they are not needed anymore
CGA_LOG_INFO("Deallocating {} bytes from window_minimizers_representation_d", window_minimizers_representation_d.size() * sizeof(decltype(window_minimizers_representation_d)::value_type));
window_minimizers_representation_d.free();
CGA_LOG_INFO("Deallocating {} bytes from window_minimizers_direction_d", window_minimizers_direction_d.size() * sizeof(decltype(window_minimizers_direction_d)::value_type));
window_minimizers_direction_d.free();
CGA_LOG_INFO("Deallocating {} bytes from window_minimizers_position_in_read_d", window_minimizers_position_in_read_d.size() * sizeof(decltype(window_minimizers_position_in_read_d)::value_type));
window_minimizers_position_in_read_d.free();
CGA_LOG_INFO("Deallocating {} bytes from read_id_to_compressed_minimizers_d", read_id_to_compressed_minimizers_d.size() * sizeof(decltype(read_id_to_compressed_minimizers_d)::value_type));
read_id_to_compressed_minimizers_d.free();
CGA_LOG_INFO("Deallocating {} bytes from read_id_to_windows_section_d", read_id_to_windows_section_d.size() * sizeof(decltype(read_id_to_windows_section_d)::value_type));
read_id_to_windows_section_d.free();
return {std::move(representations_compressed_d),
std::move(rest_compressed_d)};
}
} // namespace cudamapper
} // namespace claragenomics
| 3d707a65754b7afbbca5a7079bb42bbfe90b75af.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include <limits>
#include "minimizer.hpp"
namespace claragenomics
{
namespace cudamapper
{
Minimizer::Minimizer(representation_t representation, position_in_read_t position_in_read, DirectionOfRepresentation direction, read_id_t read_id)
: representation_(representation)
, position_in_read_(position_in_read)
, direction_(direction)
, read_id_(read_id)
{
}
representation_t Minimizer::representation() const
{
return representation_;
}
position_in_read_t Minimizer::position_in_read() const
{
return position_in_read_;
}
read_id_t Minimizer::read_id() const
{
return read_id_;
}
/// \brief Apply a hash function to a representation
///
/// Because of the non-Poisson distribuition of DNA, some common sequences with common kmer-content (e.g long poly-A runs)
/// may be over-represented in sketches. By applying a hash function, kmers are mapped to representations over
/// a more uniform space. The hash function implemented here was developed by Thomas Wang and is described
/// [here](https://gist.github.com/badboy/6267743). A mask is applied to the output so that all representations are mapped
/// to a 32 bit space.
///
/// \param key the input representation
__device__ representation_t wang_hash64(representation_t key)
{
uint64_t mask = (uint64_t(1) << 32) - 1;
key = (~key + (key << 21)) & mask;
key = key ^ key >> 24;
key = ((key + (key << 3)) + (key << 8)) & mask;
key = key ^ key >> 14;
key = ((key + (key << 2)) + (key << 4)) & mask;
key = key ^ key >> 28;
key = (key + (key << 31)) & mask;
return key;
}
Minimizer::DirectionOfRepresentation Minimizer::direction() const
{
return direction_;
}
/// \brief finds front end minimizers
///
/// Finds the minimizers of windows starting at position 0 and having window size range from 1 to window_size-1
///
/// \param minimizer_size kmer length
/// \param window_size number of kmers in one central minimizer window, kmers being shifted by one basepair each (for front end minimizers window size actually varies from 1 to window_size-1)
/// \param basepairs array of basepairs, first come basepairs for read 0, then read 1 and so on
/// \param read_id_to_basepairs_section index of the first basepair of every read (in basepairs array) and the number of basepairs in that read
/// \param window_minimizers_representation output array of representations of minimizers, grouped by reads
/// \param window_minimizers_direction output array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse)
/// \param window_minimizers_position_in_read output array of positions in read of minimizers, grouped by reads
/// \param read_id_to_window_section index of first element dedicated to that read in output arrays and the number of dedicated elements (enough elements are allocated to each read to support cases where each window has a different minimizer, no need to check that condition)
/// \param read_id_to_minimizers_written how many minimizers have been written for this read already (initially zero)
__global__ void find_front_end_minimizers(const std::uint64_t minimizer_size,
const std::uint64_t window_size,
const char* const basepairs,
const ArrayBlock* const read_id_to_basepairs_section,
representation_t* const window_minimizers_representation,
char* const window_minimizers_direction,
position_in_read_t* const window_minimizers_position_in_read,
const ArrayBlock* const read_id_to_windows_section,
std::uint32_t* const read_id_to_minimizers_written,
const bool hash_representations)
{
// TODO: simplify this method similarly to find_back_end_minimizers
if (1 == window_size)
{
// if 1 == window_size there are no end minimizer
return;
}
const auto input_array_first_element = read_id_to_basepairs_section[blockIdx.x].first_element_;
const auto output_arrays_offset = read_id_to_windows_section[blockIdx.x].first_element_;
// Dynamically allocating shared memory and assigning parts of it to different pointers
// Everything is 8-byte alligned
extern __shared__ std::uint64_t sm[];
// TODO: not all arrays are needed at the same time -> reduce shared memory requirements by reusing parts of the memory
// TODO: use sizeof to get the number of bytes
std::uint32_t shared_memory_64_bit_elements_already_taken = 0;
char* forward_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x elements
shared_memory_64_bit_elements_already_taken += (blockDim.x + 7) / 8;
char* reverse_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x elements
shared_memory_64_bit_elements_already_taken += (blockDim.x + 7) / 8;
representation_t* minimizers_representation = reinterpret_cast<representation_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x - (minimizer_size - 1) elements
shared_memory_64_bit_elements_already_taken += blockDim.x - (minimizer_size - 1);
char* minimizers_direction = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x - (minimizer_size - 1) elements; 0 - forward, 1 - reverse
shared_memory_64_bit_elements_already_taken += (blockDim.x - (minimizer_size - 1) + 7) / 8;
position_in_read_t* minimizers_position_in_read = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]);
shared_memory_64_bit_elements_already_taken += (blockDim.x - (minimizer_size - 1) + 1) / 2;
position_in_read_t* different_minimizer_than_neighbors = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // blockDim.x - (minimizer_size - 1) elements; 0 - same, 1 - different
shared_memory_64_bit_elements_already_taken += (blockDim.x - (minimizer_size - 1) + 1) / 2;
representation_t* minimizer_representation_of_largest_window_from_previous_step = (&sm[shared_memory_64_bit_elements_already_taken]); // 1 element
shared_memory_64_bit_elements_already_taken += 1;
position_in_read_t* minimizer_position_in_read_of_largest_window_from_previous_step = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element
shared_memory_64_bit_elements_already_taken += (1 + 1) / 2;
// local index = index in section of the output array dedicated to this read
position_in_read_t* local_index_to_write_next_minimizer_to = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element
shared_memory_64_bit_elements_already_taken += (1 + 1) / 2;
// TODO: Move to constant memory
char* forward_to_reverse_complement = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // 8 elements
if (0 == threadIdx.x)
{
forward_to_reverse_complement[0b000] = 0b0000;
forward_to_reverse_complement[0b001] = 0b0100; // A -> T (0b1 -> 0b10100)
forward_to_reverse_complement[0b010] = 0b0000;
forward_to_reverse_complement[0b011] = 0b0111; // C -> G (0b11 -> 0b111)
forward_to_reverse_complement[0b100] = 0b0001; // T -> A (0b10100 -> 0b1)
forward_to_reverse_complement[0b101] = 0b0000;
forward_to_reverse_complement[0b110] = 0b0000;
forward_to_reverse_complement[0b111] = 0b0011; // G -> C (0b111 -> 0b11)
}
__syncthreads();
// Each thread loads one basepair, making it blockDim.x basepairs. Each kmer has minimizer_size elements
// Number of windows is equal to the number of kmers for end minimizer
// This means a total of blockDim.x - (minimizer_size - 1) kmer can be processed in one block, where each kmer is shifted by one one basepair compared to the previous kmer
// For blockDim.x = 6 and minimizer_size = 3 there are 6 - (3 - 1) = 4 kmers
// 0 1 2
// 1 2 3
// 2 3 4
// 3 4 5
// If more minimizers have to be processed a new step is needed, in this case meaning
// 4 5 6
// 5 6 7
// 6 7 8
// 7 8 9
// This means that a number of basepairs is loaded twice, but this a tradeoff for less complex code
const std::uint16_t windows_per_loop_step = blockDim.x - (minimizer_size - 1);
*minimizer_representation_of_largest_window_from_previous_step = 0;
*minimizer_position_in_read_of_largest_window_from_previous_step = 0;
*local_index_to_write_next_minimizer_to = 0;
for (std::uint32_t first_element_in_step = 0; first_element_in_step < window_size - 1; first_element_in_step += windows_per_loop_step)
{
// load basepairs into shared memory and calculate the lexical ordering hash
if (first_element_in_step + threadIdx.x < window_size - 1 + minimizer_size - 1)
{ // window_size - 1 + minimizer_size - 1 -> total number of basepairs needed for all front minimizers
const char bp = basepairs[input_array_first_element + first_element_in_step + threadIdx.x];
forward_basepair_hashes[threadIdx.x] = 0b11 & (bp >> 2 ^ bp >> 1);
reverse_basepair_hashes[threadIdx.x] = 0b11 & (forward_to_reverse_complement[0b111 & bp] >> 2 ^ forward_to_reverse_complement[0b111 & bp] >> 1);
}
__syncthreads();
// First front end window covers only one minimizer (the one starting at positon 0), second minimizers starting at 0 and 1 and so one until the window which covers window_size-1 minimizers
// For window_size = 7 and minimize_size = 3 this means:
// window 0: 0 1 2 (0 1 2)
// window 1: 0 1 2 3 (0 1 2; 1 2 3)
// widnow 2: 0 1 2 3 4 (0 1 2; 1 2 3; 2 3 4)
// window 3: 0 1 2 3 4 5 (0 1 2; 1 2 3; 2 3 4; 3 4 5)
// window 4: 0 1 2 3 4 5 6 (0 1 2; 1 2 3; 2 3 4; 3 4 5; 4 5 6)
// window 5: 0 1 2 3 4 5 6 7 (0 1 2; 1 2 3; 2 3 4; 3 4 5; 4 5 6; 5 6 7)
// If window_size > windows_per_loop_step the other windows have to be processed in other loop steps
// For example, for blockDim.x = 6, minimizer_size = 3 (=> windows_per_loop_step = 4) and window_size = 7:
// step 0 (first_element_in_step = 0):
// window 0: 0 1 2 (0 1 2)
// window 1: 0 1 2 3 (0 1 2; 1 2 3)
// widnow 2: 0 1 2 3 4 (0 1 2; 1 2 3; 2 3 4)
// window 3: 0 1 2 3 4 5 (0 1 2; 1 2 3; 2 3 4; 3 4 5)
// step 1 (first_element_in_step = 4):
// window 4: 0 1 2 3 4 5 6 (take results for window 3 and add: 4 5 6)
// window 5: 0 1 2 3 4 5 6 7 (take results for window 3 and add: 4 5 6; 5 6 7)
// This means that a thread has a window assigned to it when thraedIdx.x < minimizers_per_loop (for all loops other than the last one) and
// when first_element_in_step + threadIdx.x < window_size - 1
const bool thread_assigned_to_a_window = first_element_in_step + threadIdx.x < window_size - 1 && threadIdx.x < windows_per_loop_step;
// calculate minimizer for each kmer in front end windows
if (thread_assigned_to_a_window)
{ // largest front minimizer window starts at basepar 0 and goes up to window_size -1
representation_t forward_representation = 0;
representation_t reverse_representation = 0;
// TODO: It's not necessary to fully build both representations in order to determine which one is smaller. In most cases there is going to be a difference already at the first element
for (std::uint16_t i = 0; i < minimizer_size; ++i)
{
forward_representation |= forward_basepair_hashes[threadIdx.x + i] << 2 * (minimizer_size - i - 1);
reverse_representation |= reverse_basepair_hashes[threadIdx.x + i] << 2 * i;
}
if (hash_representations)
{
forward_representation = wang_hash64(forward_representation);
reverse_representation = wang_hash64(reverse_representation);
}
if (forward_representation <= reverse_representation)
{
minimizers_representation[threadIdx.x] = forward_representation;
minimizers_direction[threadIdx.x] = 0;
}
else
{
minimizers_representation[threadIdx.x] = reverse_representation;
minimizers_direction[threadIdx.x] = 1;
}
}
__syncthreads();
representation_t window_minimizer_representation = 0;
position_in_read_t window_minimizer_position_in_read = 0;
// calculate minimizer for each window
// Start by the value of the first minimizer and iteratively compare it with the other minimizers in the window
// If first_element_in_step != 0 there is no need to go through all minimizers in the window. One can take the minimizer of window first_element_in_step-1
// as the current window would check exaclty the same minimizers before checking minimizer first_element_in_step
if (thread_assigned_to_a_window)
{
if (first_element_in_step != 0)
{
window_minimizer_representation = *minimizer_representation_of_largest_window_from_previous_step;
window_minimizer_position_in_read = *minimizer_position_in_read_of_largest_window_from_previous_step;
if (minimizers_representation[0] <= window_minimizer_representation)
{
window_minimizer_representation = minimizers_representation[0];
window_minimizer_position_in_read = first_element_in_step;
}
}
else
{
window_minimizer_representation = minimizers_representation[0];
window_minimizer_position_in_read = 0;
}
// All threads have to wait for the largest block to finish. Probably no better solution without big restructuring
// If there are several minimizers with the same representation only save the latest one (thus <=), others will be covered by smaller windows
for (std::uint16_t i = 1; i <= threadIdx.x; ++i)
{
if (minimizers_representation[i] <= window_minimizer_representation)
{
window_minimizer_representation = minimizers_representation[i];
window_minimizer_position_in_read = first_element_in_step + i;
}
}
minimizers_position_in_read[threadIdx.x] = window_minimizer_position_in_read;
}
__syncthreads();
// only write first occurence of each minimizer to the output array
// Hash of the last kmer in a window can be a minimizer only if it is smaller or equal than the minimizer of the previous window
// That means that the minimizer of the current window should only be written if it is different than the one of the previous window
// Otherwise it it the same minimizer and there is no need write to the the output array
// Imagine that hash representation of windows are are follows (the number in the parentheses marks the position of the last occurance of the minimizer with that representation):
// 8, 89, 898, 8987, 89878, 898785, 8987856, 89878562
// Minimizers of these windows are
// 8(0) 8(0) 8(2) 7(3) 7(3) 5(5) 5(5) 2(7)
// If we use 1 to indicate the first occurence of minimizer and 0 for repretition we get
// 1 0 1 1 0 1 0 1
// If we do an an inclusive scan on this array we get the indices to which the unique minimizers should be written to (plus one)
// 1 1 2 3 3 4 4 5
// From this it's clear that only the windows whose value is larger than the one of its neighbor should write its minimizer and it should write to the element with the index of value-1
if (first_element_in_step + threadIdx.x < window_size - 1 && threadIdx.x < windows_per_loop_step)
{
if (0 == first_element_in_step && 0 == threadIdx.x)
{
// minimizer of first window is unique for sure as it has no left neighbor
different_minimizer_than_neighbors[0] = 1;
}
else
{
representation_t neighbors_minimizers_position_in_read = 0;
// find left neighbor's window minimizer's position in read
if (0 == threadIdx.x)
{
neighbors_minimizers_position_in_read = *minimizer_position_in_read_of_largest_window_from_previous_step;
}
else
{
// TODO: consider using warp shuffle instead of shared memory
neighbors_minimizers_position_in_read = minimizers_position_in_read[threadIdx.x - 1];
}
// check if it's the same minimizer
if (neighbors_minimizers_position_in_read == minimizers_position_in_read[threadIdx.x])
{
different_minimizer_than_neighbors[threadIdx.x] = 0;
}
else
{
different_minimizer_than_neighbors[threadIdx.x] = 1;
}
}
}
__syncthreads();
// if there are more loop steps to follow write the value and position of minimizer of the largest window
if (first_element_in_step + windows_per_loop_step < window_size - 1 && threadIdx.x == windows_per_loop_step - 1)
{
*minimizer_representation_of_largest_window_from_previous_step = window_minimizer_representation;
*minimizer_position_in_read_of_largest_window_from_previous_step = window_minimizer_position_in_read;
}
// no need to sync, these two values are not used before the next sync
// perform inclusive scan
// different_minimizer_than_neighbors changes meaning an becomes more like "output_array_index_to_write_the_value_plus_one"
// TODO: implement it using warp shuffle or use CUB
if (0 == threadIdx.x)
{
std::uint16_t i = 0;
different_minimizer_than_neighbors[i] += *local_index_to_write_next_minimizer_to;
for (i = 1; i < blockDim.x - (minimizer_size - 1); ++i)
{
different_minimizer_than_neighbors[i] += different_minimizer_than_neighbors[i - 1];
}
}
__syncthreads();
// now save minimizers to output array
if (first_element_in_step + threadIdx.x < window_size - 1 && threadIdx.x < windows_per_loop_step)
{
const std::uint32_t neighbors_write_index = 0 == threadIdx.x ? *local_index_to_write_next_minimizer_to : different_minimizer_than_neighbors[threadIdx.x - 1];
if (neighbors_write_index < different_minimizer_than_neighbors[threadIdx.x])
{
const std::uint64_t output_index = output_arrays_offset + different_minimizer_than_neighbors[threadIdx.x] - 1;
window_minimizers_representation[output_index] = minimizers_representation[minimizers_position_in_read[threadIdx.x] - first_element_in_step];
window_minimizers_direction[output_index] = minimizers_direction[minimizers_position_in_read[threadIdx.x] - first_element_in_step];
window_minimizers_position_in_read[output_index] = minimizers_position_in_read[threadIdx.x];
}
}
__syncthreads();
// index (plus one) to which the last window minimizer was written is the number of all unique front end window minimizers
if (first_element_in_step + threadIdx.x == window_size - 1 - 1)
{
// "plus one" is already included in different_minimizer_than_neighbors as it was created by an inclusive scan
read_id_to_minimizers_written[blockIdx.x] = different_minimizer_than_neighbors[threadIdx.x];
}
// if there are more loop steps to follow write the output array index of last minimizer in this loop step
if (first_element_in_step + windows_per_loop_step <= window_size - 1 && threadIdx.x == windows_per_loop_step - 1)
{
*local_index_to_write_next_minimizer_to = different_minimizer_than_neighbors[threadIdx.x];
}
}
}
/// \brief finds central minimizers
///
/// Finds the minimizers of windows of size window_size starting at position 0 and moving by one basepair at a time
///
/// \param minimizer_size kmer length
/// \param window_size number of kmers in one window, kmers being shifted by one one basepair each
/// \param basepairs array of basepairs, first come basepairs for read 0, then read 1 and so on
/// \param read_id_to_basepairs_section index of the first basepair of every read (in basepairs array) and the number of basepairs in that read
/// \param window_minimizers_representation output array of representations of minimizers, grouped by reads
/// \param window_minimizers_direction output array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse)
/// \param window_minimizers_position_in_read output array of positions in read of minimizers, grouped by reads
/// \param read_id_to_window_section index of first element dedicated to that read in output arrays and the number of dedicated elements (enough elements are allocated to each read to support cases where each window has a different minimizer, no need to check that condition)
/// \param read_id_to_minimizers_written how many minimizers have been written for this read already (initially number of front end minimizers)
__global__ void find_central_minimizers(const std::uint64_t minimizer_size,
const std::uint64_t window_size,
const std::uint32_t basepairs_per_thread,
const char* const basepairs,
const ArrayBlock* const read_id_to_basepairs_section,
representation_t* const window_minimizers_representation,
char* const window_minimizers_direction,
position_in_read_t* const window_minimizers_position_in_read,
const ArrayBlock* const read_id_to_windows_section,
std::uint32_t* const read_id_to_minimizers_written,
const bool hash_representations)
{
// See find_front_end_minimizers for more details about the algorithm
const std::uint64_t index_of_first_element_to_process_global = read_id_to_basepairs_section[blockIdx.x].first_element_;
// Index of the element to which the first central minimizer of this read should be written. Index refers to the positions withing the whole array dedicated to all reads
const std::uint64_t output_index_to_write_the_first_minimizer_global = read_id_to_windows_section[blockIdx.x].first_element_ + read_id_to_minimizers_written[blockIdx.x];
const std::uint32_t basepairs_in_read = read_id_to_basepairs_section[blockIdx.x].block_size_;
const std::uint32_t kmers_in_read = basepairs_in_read - (minimizer_size - 1);
const std::uint32_t windows_in_read = kmers_in_read - (window_size - 1);
const std::uint16_t basepairs_per_loop_step = blockDim.x * basepairs_per_thread;
const std::uint16_t kmers_per_loop_step = basepairs_per_loop_step - (minimizer_size - 1);
const std::uint16_t windows_per_loop_step = kmers_per_loop_step - (window_size - 1);
// Dynamically allocating shared memory and assigning parts of it to different pointers
// Everything is 8-byte alligned
extern __shared__ std::uint64_t sm[];
// TODO: not all arrays are needed at the same time -> reduce shared memory requirements by reusing parts of the memory
// TODO: use sizeof to get the number of bytes
std::uint32_t shared_memory_64_bit_elements_already_taken = 0;
char* forward_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_per_loop_step elements
shared_memory_64_bit_elements_already_taken += (basepairs_per_loop_step + 7) / 8;
char* reverse_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_per_loop_step elements
shared_memory_64_bit_elements_already_taken += (basepairs_per_loop_step + 7) / 8;
representation_t* minimizers_representation = reinterpret_cast<representation_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // kmers_loop_step elements
shared_memory_64_bit_elements_already_taken += kmers_per_loop_step;
char* minimizers_direction = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_per_loop_step elements; 0 - forward, 1 - reverse
shared_memory_64_bit_elements_already_taken += (windows_per_loop_step + 7) / 8;
position_in_read_t* minimizers_position_in_read = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]);
shared_memory_64_bit_elements_already_taken += (windows_per_loop_step + 1) / 2;
position_in_read_t* different_minimizer_than_neighbors = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_per_loop_step elements; 0 - same, 1 - different
shared_memory_64_bit_elements_already_taken += (windows_per_loop_step + 1) / 2;
position_in_read_t* minimizer_position_in_read_of_largest_window_from_previous_step = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element
shared_memory_64_bit_elements_already_taken += (1 + 1) / 2;
position_in_read_t* local_index_to_write_next_minimizer_to = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // 1 element
shared_memory_64_bit_elements_already_taken += (1 + 1) / 2;
// TODO: Move to constant memory
char* forward_to_reverse_complement = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // 8 elements
if (0 == threadIdx.x)
{
forward_to_reverse_complement[0b000] = 0b0000;
forward_to_reverse_complement[0b001] = 0b0100; // A -> T (0b1 -> 0b10100)
forward_to_reverse_complement[0b010] = 0b0000;
forward_to_reverse_complement[0b011] = 0b0111; // C -> G (0b11 -> 0b111)
forward_to_reverse_complement[0b100] = 0b0001; // T -> A (0b10100 -> 0b1)
forward_to_reverse_complement[0b101] = 0b0000;
forward_to_reverse_complement[0b110] = 0b0000;
forward_to_reverse_complement[0b111] = 0b0011; // G -> C (0b111 -> 0b11)
}
__syncthreads();
// if there are front minimizers take them into account
if (0 != read_id_to_minimizers_written[blockIdx.x])
{
*minimizer_position_in_read_of_largest_window_from_previous_step = window_minimizers_position_in_read[output_index_to_write_the_first_minimizer_global - 1];
*local_index_to_write_next_minimizer_to = read_id_to_minimizers_written[blockIdx.x];
}
else
{
*minimizer_position_in_read_of_largest_window_from_previous_step = 0; // N/A
*local_index_to_write_next_minimizer_to = 0;
}
for (std::uint32_t first_element_in_step = 0; first_element_in_step < windows_in_read; first_element_in_step += windows_per_loop_step)
{
// load basepairs into shared memory and calculate the lexical ordering hash
for (std::uint32_t basepair_index = threadIdx.x; basepair_index < basepairs_per_loop_step && first_element_in_step + basepair_index < basepairs_in_read; basepair_index += blockDim.x)
{
const char bp = basepairs[index_of_first_element_to_process_global + first_element_in_step + basepair_index];
forward_basepair_hashes[basepair_index] = 0b11 & (bp >> 2 ^ bp >> 1);
reverse_basepair_hashes[basepair_index] = 0b11 & (forward_to_reverse_complement[0b111 & bp] >> 2 ^ forward_to_reverse_complement[0b111 & bp] >> 1);
}
__syncthreads();
// calculate kmer minimizers
for (std::uint32_t kmer_index = threadIdx.x; kmer_index < kmers_per_loop_step && first_element_in_step + kmer_index < kmers_in_read; kmer_index += blockDim.x)
{
representation_t forward_representation = 0;
representation_t reverse_representation = 0;
// TODO: It's not necessary to fully build both representations in order to determine which one is smaller. In most cases there is going to be a difference already at the first element
for (std::uint16_t i = 0; i < minimizer_size; ++i)
{
forward_representation |= forward_basepair_hashes[kmer_index + i] << 2 * (minimizer_size - i - 1);
reverse_representation |= reverse_basepair_hashes[kmer_index + i] << 2 * i;
}
if (hash_representations)
{
forward_representation = wang_hash64(forward_representation);
reverse_representation = wang_hash64(reverse_representation);
}
if (forward_representation <= reverse_representation)
{
minimizers_representation[kmer_index] = forward_representation;
minimizers_direction[kmer_index] = 0;
}
else
{
minimizers_representation[kmer_index] = reverse_representation;
minimizers_direction[kmer_index] = 1;
}
}
__syncthreads();
position_in_read_t window_minimizer_position_in_read = 0;
// find window minimizer
for (std::uint32_t window_index = threadIdx.x; window_index < windows_per_loop_step && first_element_in_step + window_index < windows_in_read; window_index += blockDim.x)
{
// assume that the minimizer of the first kmer in step is the window minimizer
representation_t window_minimizer_representation = minimizers_representation[window_index];
window_minimizer_position_in_read = first_element_in_step + window_index;
// now check the minimizers of all other windows
for (std::uint16_t i = 1; i < window_size; ++i)
{
if (minimizers_representation[window_index + i] <= window_minimizer_representation)
{
window_minimizer_representation = minimizers_representation[window_index + i];
window_minimizer_position_in_read = first_element_in_step + window_index + i;
}
}
minimizers_position_in_read[window_index] = window_minimizer_position_in_read;
}
__syncthreads();
// check if the window to the left has a the same minimizer
for (std::uint32_t window_index = threadIdx.x; window_index < windows_per_loop_step && first_element_in_step + window_index < windows_in_read; window_index += blockDim.x)
{
// if this is the first window in read and there were no front end minimizers than this is the first occurence of this minimizer
if (0 == first_element_in_step + window_index && 0 == read_id_to_minimizers_written[blockIdx.x])
{
different_minimizer_than_neighbors[0] = 1;
}
else
{
representation_t neighbors_minimizers_position_in_read = 0;
// find left neighbor's window minimizer's position in read
if (0 == window_index)
{
neighbors_minimizers_position_in_read = *minimizer_position_in_read_of_largest_window_from_previous_step;
}
else
{
// TODO: consider using warp shuffle instead of shared memory
neighbors_minimizers_position_in_read = minimizers_position_in_read[window_index - 1];
}
// check if it's the same minimizer
if (neighbors_minimizers_position_in_read == minimizers_position_in_read[window_index])
{
different_minimizer_than_neighbors[window_index] = 0;
}
else
{
different_minimizer_than_neighbors[window_index] = 1;
}
}
}
__syncthreads();
// if there are more loop steps to follow write the position of minimizer of the last window
// "windows_per_loop_step % blockDim.x - 1" determines the thread which processes the last window
if (first_element_in_step + windows_per_loop_step < windows_in_read && threadIdx.x == windows_per_loop_step % blockDim.x - 1)
{
*minimizer_position_in_read_of_largest_window_from_previous_step = window_minimizer_position_in_read;
}
// perform inclusive scan
// different_minimizer_than_neighbors changes meaning an becomes more like "output_array_index_to_write_the_value_plus_one"
// TODO: implement it using warp shuffle or use CUB
if (0 == threadIdx.x)
{
std::uint16_t i = 0;
different_minimizer_than_neighbors[i] += *local_index_to_write_next_minimizer_to;
for (i = 1; i < windows_per_loop_step && first_element_in_step + i < windows_in_read; ++i)
{
different_minimizer_than_neighbors[i] += different_minimizer_than_neighbors[i - 1];
}
}
__syncthreads();
// now save minimizers to output array
for (std::uint32_t window_index = threadIdx.x; window_index < windows_per_loop_step && first_element_in_step + window_index < windows_in_read; window_index += blockDim.x)
{
// if first_element_in_loop == 0 and window_index == 0 then *local_index_to_write_next_minimizer_to is set to 0 before entering the loop
const std::uint32_t neighbors_write_index = 0 == window_index ? *local_index_to_write_next_minimizer_to : different_minimizer_than_neighbors[window_index - 1];
if (neighbors_write_index < different_minimizer_than_neighbors[window_index])
{
// output array offset added in inclusive sum
const auto output_index = read_id_to_windows_section[blockIdx.x].first_element_ + different_minimizer_than_neighbors[window_index] - 1;
window_minimizers_representation[output_index] = minimizers_representation[minimizers_position_in_read[window_index] - first_element_in_step];
window_minimizers_direction[output_index] = minimizers_direction[minimizers_position_in_read[window_index] - first_element_in_step];
window_minimizers_position_in_read[output_index] = minimizers_position_in_read[window_index];
}
}
__syncthreads();
// increase the number of written minimizers by the number of central minimizers
// the value is increased by the write index of the last window in read
if (first_element_in_step + windows_per_loop_step >= windows_in_read && 0 == threadIdx.x)
{ // only do it when there is not going to be new loop step
read_id_to_minimizers_written[blockIdx.x] = different_minimizer_than_neighbors[windows_in_read - first_element_in_step - 1]; // write the index of the last window
}
// if there are more loop steps to follow write the output array index of the last minimizer in this loop step
if (first_element_in_step + windows_per_loop_step < windows_in_read && 0 == threadIdx.x)
{
*local_index_to_write_next_minimizer_to = different_minimizer_than_neighbors[windows_per_loop_step - 1]; // index of last written minimizer + 1
}
}
}
/// \brief finds back end minimizers
///
/// Finds the minimizers of windows ending end the last basepair and having window size range from 1 to window_size-1
///
/// \param minimizer_size kmer length
/// \param window_size number of kmers in one central minimizer window, kmers being shifted by one basepair each (for back end minimizers window size actually varies from 1 to window_size-1)
/// \param basepairs array of basepairs, first come basepairs for read 0, then read 1 and so on
/// \param read_id_to_basepairs_section index of the first basepair of every read (in basepairs array) and the number of basepairs in that read
/// \param window_minimizers_representation output array of representations of minimizers, grouped by reads
/// \param window_minimizers_direction output array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse)
/// \param window_minimizers_position_in_read output array of positions in read of minimizers, grouped by reads
/// \param read_id_to_window_section index of first element dedicated to that read in output arrays and the number of dedicated elements (enough elements are allocated to each read to support cases where each window has a different minimizer, no need to check that condition)
/// \param read_id_to_minimizers_written how many minimizers have been written for this read already (initially number of front end and central minimizers)
__global__ void find_back_end_minimizers(const std::uint64_t minimizer_size,
const std::uint64_t window_size,
const char* const basepairs,
const ArrayBlock* const read_id_to_basepairs_section,
representation_t* const window_minimizers_representation,
char* const window_minimizers_direction,
position_in_read_t* const window_minimizers_position_in_read,
const ArrayBlock* const read_id_to_windows_section,
std::uint32_t* const read_id_to_minimizers_written,
const bool hash_representations)
{
// See find_front_end_minimizers for more details about the algorithm
if (1 == window_size)
{
// if 1 == window_size there are no end minimizer
return;
}
// Index of first basepair which belongs to the largest back end minimizers. Index of that basepair within the read
const auto index_of_first_element_to_process_local = read_id_to_basepairs_section[blockIdx.x].block_size_ - (window_size - 1 + minimizer_size - 1);
// Index of first basepair which belongs to the largest back end minimizers. Index of that basepair within the whole array of basepairs for all reads
const auto index_of_first_element_to_process_global = read_id_to_basepairs_section[blockIdx.x].first_element_ + index_of_first_element_to_process_local;
// Index of the element to which the first back end minimizer of this read should be written. Index refers to the positions withing the section dedicate to this read
const auto output_index_to_write_the_first_minimizer_local = read_id_to_minimizers_written[blockIdx.x];
// Index of the element to which the first back end minimizer of this read should be written. Index refers to the positions withing the whole array dedicated to all reads
const auto output_index_to_write_the_first_minimizer_global = read_id_to_windows_section[blockIdx.x].first_element_ + output_index_to_write_the_first_minimizer_local;
// Dynamically allocating shared memory and assigning parts of it to different pointers
// Everything is 8-byte alligned
extern __shared__ std::uint64_t sm[];
// TODO: not all arrays are needed at the same time -> reduce shared memory requirements by reusing parts of the memory
// TODO: use sizeof to get the number of bytes
std::uint32_t shared_memory_64_bit_elements_already_taken = 0;
char* forward_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_to_process elements
shared_memory_64_bit_elements_already_taken += (window_size - 1 + minimizer_size - 1 + 7) / 8;
char* reverse_basepair_hashes = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // basepairs_to_process elements
shared_memory_64_bit_elements_already_taken += (window_size - 1 + minimizer_size - 1 + 7) / 8;
representation_t* minimizers_representation = reinterpret_cast<representation_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // kmers_to_process elements
shared_memory_64_bit_elements_already_taken += window_size - 1;
char* minimizers_direction = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // kmers_to_process elements; 0 - forward, 1 - reverse
shared_memory_64_bit_elements_already_taken += (window_size - 1 + 7) / 8;
position_in_read_t* minimizers_position_in_read = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_to_process elements
shared_memory_64_bit_elements_already_taken += (window_size - 1 + 1) / 2;
position_in_read_t* different_minimizer_than_neighbors = reinterpret_cast<position_in_read_t*>(&sm[shared_memory_64_bit_elements_already_taken]); // windows_to_process elements; 0 - same, 1 - different
shared_memory_64_bit_elements_already_taken += (window_size - 1 + 1) / 2;
// TODO: Move to constant memory
char* forward_to_reverse_complement = reinterpret_cast<char*>(&sm[shared_memory_64_bit_elements_already_taken]); // 8 elements
if (0 == threadIdx.x)
{
forward_to_reverse_complement[0b000] = 0b0000;
forward_to_reverse_complement[0b001] = 0b0100; // A -> T (0b1 -> 0b10100)
forward_to_reverse_complement[0b010] = 0b0000;
forward_to_reverse_complement[0b011] = 0b0111; // C -> G (0b11 -> 0b111)
forward_to_reverse_complement[0b100] = 0b0001; // T -> A (0b10100 -> 0b1)
forward_to_reverse_complement[0b101] = 0b0000;
forward_to_reverse_complement[0b110] = 0b0000;
forward_to_reverse_complement[0b111] = 0b0011; // G -> C (0b111 -> 0b11)
}
__syncthreads();
// There are only window_size-1 back end windows. window_size usually has the value of a few dozens
// Having windows_size so large that it does not fit the shared memory is unlikely
// If that happens implement this method similarly to find_central_minimizers
// load basepairs into shared memory and calculate the lexical ordering hash
for (std::uint16_t basepair_index = threadIdx.x; basepair_index < window_size - 1 + minimizer_size - 1; basepair_index += blockDim.x)
{
const char bp = basepairs[index_of_first_element_to_process_global + basepair_index];
forward_basepair_hashes[basepair_index] = 0b11 & (bp >> 2 ^ bp >> 1);
reverse_basepair_hashes[basepair_index] = 0b11 & (forward_to_reverse_complement[0b111 & bp] >> 2 ^ forward_to_reverse_complement[0b111 & bp] >> 1);
}
__syncthreads();
// calculate kmer minimizers
// For back end minimizers the number of kmers is the same as the number of windows
for (std::uint16_t kmer_index = threadIdx.x; kmer_index < window_size - 1; kmer_index += blockDim.x)
{
representation_t forward_representation = 0;
representation_t reverse_representation = 0;
// TODO: It's not necessary to fully build both representations in order to determine which one is smaller. In most cases there is going to be a difference already at the first element
for (std::uint16_t i = 0; i < minimizer_size; ++i)
{
forward_representation |= forward_basepair_hashes[kmer_index + i] << 2 * (minimizer_size - i - 1);
reverse_representation |= reverse_basepair_hashes[kmer_index + i] << 2 * i;
}
if (hash_representations)
{
forward_representation = wang_hash64(forward_representation);
reverse_representation = wang_hash64(reverse_representation);
}
if (forward_representation <= reverse_representation)
{
minimizers_representation[kmer_index] = forward_representation;
minimizers_direction[kmer_index] = 0;
}
else
{
minimizers_representation[kmer_index] = reverse_representation;
minimizers_direction[kmer_index] = 1;
}
}
__syncthreads();
// find window minimizer
for (std::uint16_t window_index = threadIdx.x; window_index < window_size - 1; window_index += blockDim.x)
{
// assume that the first kmer in the window is the minimizer
representation_t window_minimizer_representation = minimizers_representation[window_index];
position_in_read_t window_minimizer_position_in_read = index_of_first_element_to_process_local + window_index;
// now check other kmers in the window (note that this the back end minimizer, so not all windows have the same length)
for (std::uint16_t i = 1; window_index + i < window_size - 1; ++i)
{
if (minimizers_representation[window_index + i] <= window_minimizer_representation)
{
window_minimizer_representation = minimizers_representation[window_index + i];
window_minimizer_position_in_read = index_of_first_element_to_process_local + window_index + i;
}
}
minimizers_position_in_read[window_index] = window_minimizer_position_in_read;
}
__syncthreads();
// check if the window to the left has a the same minimizer
for (std::uint16_t window_index = threadIdx.x; window_index < window_size - 1; window_index += blockDim.x)
{
representation_t neighbors_minimizers_position_in_read = 0;
// find left neighbor's window minimizer's position in read
if (0 == window_index)
{
// if this is the first window take the position of the minimizer of the last central minimizer
neighbors_minimizers_position_in_read = window_minimizers_position_in_read[output_index_to_write_the_first_minimizer_global - 1];
}
else
{
// TODO: consider using warp shuffle instead of shared memory
neighbors_minimizers_position_in_read = minimizers_position_in_read[window_index - 1];
}
// check if it's the same minimizer
if (neighbors_minimizers_position_in_read == minimizers_position_in_read[window_index])
{
different_minimizer_than_neighbors[window_index] = 0;
}
else
{
different_minimizer_than_neighbors[window_index] = 1;
}
}
__syncthreads();
// perform inclusive scan
// different_minimizer_than_neighbors changes meaning an becomes more like "output_array_index_to_write_the_value_plus_one"
// TODO: implement it using warp shuffle or use CUB
if (0 == threadIdx.x)
{
// read_id_to_minimizers_written[blockIdx.x] is the index of the last written plus one
different_minimizer_than_neighbors[0] += output_index_to_write_the_first_minimizer_local;
for (std::uint16_t i = 1; i < window_size - 1; ++i)
{
different_minimizer_than_neighbors[i] += different_minimizer_than_neighbors[i - 1];
}
}
__syncthreads();
// now save minimizers to output array
for (std::uint16_t window_index = threadIdx.x; window_index < window_size - 1; window_index += blockDim.x)
{
// different_minimizer_than_neighbors contians an inclusive scan, i.e. it's index_to_write_to + 1
const std::uint32_t neighbors_write_index = 0 == window_index ? output_index_to_write_the_first_minimizer_local : different_minimizer_than_neighbors[window_index - 1];
if (neighbors_write_index < different_minimizer_than_neighbors[window_index])
{
// to get the actual index to write to do -1 to different_minimizer_than_neighbors
const auto output_index = read_id_to_windows_section[blockIdx.x].first_element_ + different_minimizer_than_neighbors[window_index] - 1;
// substract index_of_first_element_to_process_local to get the index in shared memory
window_minimizers_representation[output_index] = minimizers_representation[minimizers_position_in_read[window_index] - index_of_first_element_to_process_local];
window_minimizers_direction[output_index] = minimizers_direction[minimizers_position_in_read[window_index] - index_of_first_element_to_process_local];
window_minimizers_position_in_read[output_index] = minimizers_position_in_read[window_index];
}
}
__syncthreads();
// save the write index of the last written minimizer
if (0 == threadIdx.x)
{
read_id_to_minimizers_written[blockIdx.x] = different_minimizer_than_neighbors[window_size - 1 - 1];
}
}
/// \brief packs minimizers of different reads together
///
/// window_minimizers_representation, window_minimizers_position_in_read and window_minimizers_direction all allocate one element for each window in the read.
/// Many windows share the same minimizer and each minimizer is written only once, meaning many elements do not contain minimizers.
/// This function creates new arrays where such elements do not exist.
/// Note that in the input arrays all minimizers of one read are written consecutively, i.e. [read 0 minimizers], [read 0 junk], [read 1 minimizers], [read 1 junk], [read 2 minimizers]...
///
/// \param window_minimizers_representation array of representations of minimizers, grouped by reads
/// \param window_minimizers_position_in_read array of positions in read of minimizers, grouped by reads
/// \param window_minimizers_direction array of directions of minimizers, grouped by reads (0 - forward, 1 - reverse)
/// \param read_id_to_windows_section index of first element dedicated to that read in input arrays and the number of dedicated elements
/// \param representations_compressed array of representations of minimizers, grouped by reads, without invalid elements between the reads
/// \param rest_compressed array of read_ids, positions_in_read and directions of reads, grouped by reads, without invalid elements between the reads
/// \param read_id_to_compressed_minimizers index of first element dedicated to that read in input arrays and the number of dedicated elements
__global__ void compress_minimizers(const representation_t* const window_minimizers_representation,
const position_in_read_t* const window_minimizers_position_in_read,
const char* const window_minimizers_direction,
const ArrayBlock* const read_id_to_windows_section,
representation_t* const representations_compressed,
Minimizer::ReadidPositionDirection* const rest_compressed,
const ArrayBlock* const read_id_to_compressed_minimizers,
std::uint32_t offset)
{
const auto& first_input_minimizer = read_id_to_windows_section[blockIdx.x].first_element_;
const auto& first_output_minimizer = read_id_to_compressed_minimizers[blockIdx.x].first_element_;
const auto& number_of_minimizers = read_id_to_compressed_minimizers[blockIdx.x].block_size_;
for (std::uint32_t i = threadIdx.x; i < number_of_minimizers; i += blockDim.x)
{
representations_compressed[first_output_minimizer + i] = window_minimizers_representation[first_input_minimizer + i];
rest_compressed[first_output_minimizer + i].read_id_ = blockIdx.x + offset;
rest_compressed[first_output_minimizer + i].position_in_read_ = window_minimizers_position_in_read[first_input_minimizer + i];
rest_compressed[first_output_minimizer + i].direction_ = window_minimizers_direction[first_input_minimizer + i];
}
}
Minimizer::GeneratedSketchElements Minimizer::generate_sketch_elements(std::shared_ptr<DeviceAllocator> allocator, const std::uint64_t number_of_reads_to_add,
const std::uint64_t minimizer_size,
const std::uint64_t window_size,
const std::uint64_t read_id_of_first_read,
const device_buffer<char>& merged_basepairs_d,
const std::vector<ArrayBlock>& read_id_to_basepairs_section_h,
const device_buffer<ArrayBlock>& read_id_to_basepairs_section_d,
const bool hash_representations)
{
// for each read find the maximum number of minimizers (one per window), determine their section in the minimizer arrays and allocate the arrays
std::uint64_t total_windows = 0;
std::vector<ArrayBlock> read_id_to_windows_section_h(number_of_reads_to_add, {0, 0});
for (read_id_t read_id = 0; read_id < number_of_reads_to_add; ++read_id)
{
read_id_to_windows_section_h[read_id].first_element_ = total_windows;
std::uint32_t windows = window_size - 1; // front end minimizers
windows += read_id_to_basepairs_section_h[read_id].block_size_ - (minimizer_size + window_size - 1) + 1; // central minimizers
windows += window_size - 1;
read_id_to_windows_section_h[read_id].block_size_ = windows;
total_windows += windows;
}
CGA_LOG_INFO("Allocating {} bytes for read_id_to_windows_section_d", read_id_to_windows_section_h.size() * sizeof(decltype(read_id_to_windows_section_h)::value_type));
device_buffer<decltype(read_id_to_windows_section_h)::value_type> read_id_to_windows_section_d(read_id_to_windows_section_h.size(), allocator);
CGA_CU_CHECK_ERR(cudaMemcpy(read_id_to_windows_section_d.data(),
read_id_to_windows_section_h.data(),
read_id_to_windows_section_h.size() * sizeof(decltype(read_id_to_windows_section_h)::value_type),
cudaMemcpyHostToDevice));
CGA_LOG_INFO("Allocating {} bytes for window_minimizers_representation_d", total_windows * sizeof(representation_t));
device_buffer<representation_t> window_minimizers_representation_d(total_windows, allocator);
CGA_LOG_INFO("Allocating {} bytes for window_minimizers_direction_d", total_windows * sizeof(char));
device_buffer<char> window_minimizers_direction_d(total_windows, allocator);
CGA_LOG_INFO("Allocating {} bytes for window_minimizers_position_in_read_d", total_windows * sizeof(position_in_read_t));
device_buffer<position_in_read_t> window_minimizers_position_in_read_d(total_windows, allocator);
CGA_LOG_INFO("Allocating {} bytes for read_id_to_minimizers_written_d", number_of_reads_to_add * sizeof(std::uint32_t));
device_buffer<std::uint32_t> read_id_to_minimizers_written_d(number_of_reads_to_add, allocator);
// initially there are no minimizers written to the output arrays
CGA_CU_CHECK_ERR(cudaMemset(read_id_to_minimizers_written_d.data(), 0, number_of_reads_to_add * sizeof(std::uint32_t)));
// *** front end minimizers ***
std::uint32_t num_of_basepairs_for_front_minimizers = (window_size - 1) + minimizer_size - 1;
std::uint32_t num_of_threads = std::min(num_of_basepairs_for_front_minimizers, 64u);
// largest window in end minimizers has the size of window_size-1, meaning it covers window_size-1 + minimizer_size - 1 basepairs
const std::uint32_t basepairs_for_end_minimizers = (window_size - 1 + minimizer_size - 1);
const std::uint32_t kmers_for_end_minimizers = window_size - 1; // for end minimizers number of kmers is the as the number of windows because the last window has only one kmer
const std::uint32_t windows_for_end_minimizers = window_size - 1;
// determine total ammount for shared memory needed (see kernel for clarification)
// shared memeory is alligned to 8 bytes, so for 1-byte variables (x+7)/8 values are allocate (for 10 1-byte elements (10+7)/8=17/8=2 8-byte elements are allocated, instead of 10/1=1 which would be wrong)
// the final number of allocated 8-byte values is multiplied by 8 at the end in order to get number of bytes needed
std::uint32_t shared_memory_for_kernel = 0;
shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // forward basepairs (char)
shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // reverse basepairs (char)
shared_memory_for_kernel += (kmers_for_end_minimizers); // representations of minimizers (representation_t)
shared_memory_for_kernel += (windows_for_end_minimizers + 7) / 8; // directions of representations of minimizers (char)
shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // position_in_read of minimizers (position_in_read_t)
shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // does the window have a different minimizer than its left neighbor (position_in_read_t)
shared_memory_for_kernel += 1; // representation from previous step
shared_memory_for_kernel += (1 + 1) / 2; // position from previous step (char)
shared_memory_for_kernel += (1 + 1) / 2; // inclusive sum from previous step (position_in_read_t)
shared_memory_for_kernel += 8 / 8; // forward -> reverse complement conversion (char)
shared_memory_for_kernel *= 8; // before it the number of 8-byte values, now get the number of bytes
CGA_LOG_INFO("Launching find_front_end_minimizers with {} bytes of shared memory", shared_memory_for_kernel);
find_front_end_minimizers<<<number_of_reads_to_add, num_of_threads, shared_memory_for_kernel>>>(minimizer_size,
window_size,
merged_basepairs_d.data(),
read_id_to_basepairs_section_d.data(),
window_minimizers_representation_d.data(),
window_minimizers_direction_d.data(),
window_minimizers_position_in_read_d.data(),
read_id_to_windows_section_d.data(),
read_id_to_minimizers_written_d.data(),
hash_representations);
CGA_CU_CHECK_ERR(cudaStreamSynchronize(0));
// *** central minimizers ***
const std::uint32_t basepairs_per_thread = 8; // arbitrary, tradeoff between the number of thread blocks that can be scheduled simultaneously and the number of basepairs which have to be loaded multiple times beacuse only basepairs_per_thread*num_of_threads-(window_size_ + minimizer_size_ - 1) + 1 can be processed at once, i.e. window_size+minimizer_size-2 basepairs have to be loaded again
num_of_threads = 64; // arbitrary
const std::uint32_t basepairs_in_loop_step = num_of_threads * basepairs_per_thread;
const std::uint32_t minimizers_in_loop_step = basepairs_in_loop_step - minimizer_size + 1;
const std::uint32_t windows_in_loop_step = minimizers_in_loop_step - window_size + 1;
shared_memory_for_kernel = 0;
shared_memory_for_kernel += (basepairs_in_loop_step + 7) / 8; // forward basepairs (char)
shared_memory_for_kernel += (basepairs_in_loop_step + 7) / 8; // reverse basepairs (char)
shared_memory_for_kernel += minimizers_in_loop_step; // representations of minimizers (representation_t)
shared_memory_for_kernel += (windows_in_loop_step + 7) / 8; // directions of representations of minimizers (char)
shared_memory_for_kernel += (windows_in_loop_step + 1) / 2; // position_in_read of minimizers (position_in_read_t)
shared_memory_for_kernel += (windows_in_loop_step + 1) / 2; // does the window have a different minimizer than its left neighbor
shared_memory_for_kernel += (1 + 1) / 2; // position from previous step (char)
shared_memory_for_kernel += (1 + 1) / 2; // inclusive sum from previous step (position_in_read_t)
shared_memory_for_kernel += 8 / 8; // forward -> reverse complement conversion (char)
shared_memory_for_kernel *= 8; // before it the number of 8-byte values, now get the number of bytes
CGA_LOG_INFO("Launching find_central_minimizers with {} bytes of shared memory", shared_memory_for_kernel);
find_central_minimizers<<<number_of_reads_to_add, num_of_threads, shared_memory_for_kernel>>>(minimizer_size,
window_size,
basepairs_per_thread,
merged_basepairs_d.data(),
read_id_to_basepairs_section_d.data(),
window_minimizers_representation_d.data(),
window_minimizers_direction_d.data(),
window_minimizers_position_in_read_d.data(),
read_id_to_windows_section_d.data(),
read_id_to_minimizers_written_d.data(),
hash_representations);
CGA_CU_CHECK_ERR(cudaStreamSynchronize(0));
// *** back end minimizers ***
num_of_threads = 64;
// largest window should fit shared memory
shared_memory_for_kernel = 0;
shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // forward basepairs (char)
shared_memory_for_kernel += (basepairs_for_end_minimizers + 7) / 8; // reverse basepairs (char)
shared_memory_for_kernel += kmers_for_end_minimizers; // representations of minimizers (representation_t)
shared_memory_for_kernel += (kmers_for_end_minimizers + 7) / 8; // directions of representations of minimizers (char)
shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // position_in_read of minimizers (position_in_read_t)
shared_memory_for_kernel += (windows_for_end_minimizers + 1) / 2; // does the window have a different minimizer than its left neighbor
shared_memory_for_kernel += 8 / 8; // forward -> reverse complement conversion (char)
shared_memory_for_kernel *= 8; // before it the number of 8-byte values, now get the number of bytes
CGA_LOG_INFO("Launching find_back_end_minimizers with {} bytes of shared memory", shared_memory_for_kernel);
find_back_end_minimizers<<<number_of_reads_to_add, num_of_threads, shared_memory_for_kernel>>>(minimizer_size,
window_size,
merged_basepairs_d.data(),
read_id_to_basepairs_section_d.data(),
window_minimizers_representation_d.data(),
window_minimizers_direction_d.data(),
window_minimizers_position_in_read_d.data(),
read_id_to_windows_section_d.data(),
read_id_to_minimizers_written_d.data(),
hash_representations);
CGA_CU_CHECK_ERR(cudaStreamSynchronize(0));
std::vector<std::uint32_t> read_id_to_minimizers_written_h(number_of_reads_to_add);
CGA_CU_CHECK_ERR(cudaMemcpy(read_id_to_minimizers_written_h.data(),
read_id_to_minimizers_written_d.data(),
read_id_to_minimizers_written_h.size() * sizeof(decltype(read_id_to_minimizers_written_h)::value_type),
cudaMemcpyDeviceToHost));
CGA_LOG_INFO("Deallocating {} bytes from read_id_to_minimizers_written_d", read_id_to_minimizers_written_d.size() * sizeof(decltype(read_id_to_minimizers_written_d)::value_type));
read_id_to_minimizers_written_d.free();
// *** remove unused elemets from the window minimizers arrays ***
// In window_minimizers_representation_d and other arrays enough space was allocated to support cases where each window has a different minimizers. In reality many neighboring windows share the same mininizer
// As a result there are areas of meaningless data between minimizers belonging to different reads (space_allocated_for_all_possible_minimizers_of_a_read - space_needed_for_the_actuall_minimizers)
// At this point all mininizer are put together (compressed) so that the last minimizer of one read is next to the first minimizer of another read
// Data is organized in two arrays in order to support usage of thrust::stable_sort_by_key. One contains representations (key) and the other the rest (values)
std::vector<ArrayBlock> read_id_to_compressed_minimizers_h(number_of_reads_to_add, {0, 0});
std::uint64_t total_minimizers = 0;
for (std::size_t read_id = 0; read_id < read_id_to_minimizers_written_h.size(); ++read_id)
{
read_id_to_compressed_minimizers_h[read_id].first_element_ = total_minimizers;
read_id_to_compressed_minimizers_h[read_id].block_size_ = read_id_to_minimizers_written_h[read_id];
total_minimizers += read_id_to_minimizers_written_h[read_id];
}
CGA_LOG_INFO("Allocating {} bytes for read_id_to_compressed_minimizers_d", read_id_to_compressed_minimizers_h.size() * sizeof(decltype(read_id_to_compressed_minimizers_h)::value_type));
device_buffer<decltype(read_id_to_compressed_minimizers_h)::value_type> read_id_to_compressed_minimizers_d(read_id_to_compressed_minimizers_h.size(), allocator);
CGA_CU_CHECK_ERR(cudaMemcpy(read_id_to_compressed_minimizers_d.data(),
read_id_to_compressed_minimizers_h.data(),
read_id_to_compressed_minimizers_h.size() * sizeof(decltype(read_id_to_compressed_minimizers_h)::value_type),
cudaMemcpyHostToDevice));
CGA_LOG_INFO("Allocating {} bytes for representations_compressed_d", total_minimizers * sizeof(representation_t));
device_buffer<representation_t> representations_compressed_d(total_minimizers, allocator);
// rest = position_in_read, direction and read_id
CGA_LOG_INFO("Allocating {} bytes for rest_compressed_d", total_minimizers * sizeof(ReadidPositionDirection));
device_buffer<ReadidPositionDirection> rest_compressed_d(total_minimizers, allocator);
CGA_LOG_INFO("Launching compress_minimizers with {} bytes of shared memory", 0);
compress_minimizers<<<number_of_reads_to_add, 128>>>(window_minimizers_representation_d.data(),
window_minimizers_position_in_read_d.data(),
window_minimizers_direction_d.data(),
read_id_to_windows_section_d.data(),
representations_compressed_d.data(),
rest_compressed_d.data(),
read_id_to_compressed_minimizers_d.data(),
read_id_of_first_read);
CGA_CU_CHECK_ERR(cudaStreamSynchronize(0));
// free these arrays as they are not needed anymore
CGA_LOG_INFO("Deallocating {} bytes from window_minimizers_representation_d", window_minimizers_representation_d.size() * sizeof(decltype(window_minimizers_representation_d)::value_type));
window_minimizers_representation_d.free();
CGA_LOG_INFO("Deallocating {} bytes from window_minimizers_direction_d", window_minimizers_direction_d.size() * sizeof(decltype(window_minimizers_direction_d)::value_type));
window_minimizers_direction_d.free();
CGA_LOG_INFO("Deallocating {} bytes from window_minimizers_position_in_read_d", window_minimizers_position_in_read_d.size() * sizeof(decltype(window_minimizers_position_in_read_d)::value_type));
window_minimizers_position_in_read_d.free();
CGA_LOG_INFO("Deallocating {} bytes from read_id_to_compressed_minimizers_d", read_id_to_compressed_minimizers_d.size() * sizeof(decltype(read_id_to_compressed_minimizers_d)::value_type));
read_id_to_compressed_minimizers_d.free();
CGA_LOG_INFO("Deallocating {} bytes from read_id_to_windows_section_d", read_id_to_windows_section_d.size() * sizeof(decltype(read_id_to_windows_section_d)::value_type));
read_id_to_windows_section_d.free();
return {std::move(representations_compressed_d),
std::move(rest_compressed_d)};
}
} // namespace cudamapper
} // namespace claragenomics
|
4b8938a737c9c60971b07e8ebca56e2fd20e0671.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "transform_image.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
const float *raw_input = NULL;
hipMalloc(&raw_input, XSIZE*YSIZE);
const int width = 1;
const int channels = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
transform_image), dim3(gridBlock),dim3(threadBlock), 0, 0, input,raw_input,width,channels);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
transform_image), dim3(gridBlock),dim3(threadBlock), 0, 0, input,raw_input,width,channels);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
transform_image), dim3(gridBlock),dim3(threadBlock), 0, 0, input,raw_input,width,channels);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4b8938a737c9c60971b07e8ebca56e2fd20e0671.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "transform_image.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
const float *raw_input = NULL;
cudaMalloc(&raw_input, XSIZE*YSIZE);
const int width = 1;
const int channels = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
transform_image<<<gridBlock,threadBlock>>>(input,raw_input,width,channels);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
transform_image<<<gridBlock,threadBlock>>>(input,raw_input,width,channels);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
transform_image<<<gridBlock,threadBlock>>>(input,raw_input,width,channels);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
59e6584fd8669170f0693755a80f3861520d6c98.hip | // !!! This is a file automatically generated by hipify!!!
//
// auto-generated by op2.py
//
//global constants
#ifndef MAX_CONST_SIZE
#define MAX_CONST_SIZE 128
#endif
__constant__ double smoothing_coefficient_cuda;
__constant__ double ff_variable_cuda[5];
__constant__ double ff_flux_contribution_momentum_x_cuda[3];
__constant__ double ff_flux_contribution_momentum_y_cuda[3];
__constant__ double ff_flux_contribution_momentum_z_cuda[3];
__constant__ double ff_flux_contribution_density_energy_cuda[3];
__constant__ int mesh_name_cuda;
//header
#include "op_lib_cpp.h"
#include "op_cuda_rt_support.h"
#include "op_cuda_reduction.h"
void op_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
if (!OP_hybrid_gpu) return;
if (!strcmp(name,"smoothing_coefficient")) {
cutilSafeCall(hipMemcpyToSymbol(smoothing_coefficient_cuda, dat, dim*size));
}
else
if (!strcmp(name,"ff_variable")) {
cutilSafeCall(hipMemcpyToSymbol(ff_variable_cuda, dat, dim*size));
}
else
if (!strcmp(name,"ff_flux_contribution_momentum_x")) {
cutilSafeCall(hipMemcpyToSymbol(ff_flux_contribution_momentum_x_cuda, dat, dim*size));
}
else
if (!strcmp(name,"ff_flux_contribution_momentum_y")) {
cutilSafeCall(hipMemcpyToSymbol(ff_flux_contribution_momentum_y_cuda, dat, dim*size));
}
else
if (!strcmp(name,"ff_flux_contribution_momentum_z")) {
cutilSafeCall(hipMemcpyToSymbol(ff_flux_contribution_momentum_z_cuda, dat, dim*size));
}
else
if (!strcmp(name,"ff_flux_contribution_density_energy")) {
cutilSafeCall(hipMemcpyToSymbol(ff_flux_contribution_density_energy_cuda, dat, dim*size));
}
else
if (!strcmp(name,"mesh_name")) {
cutilSafeCall(hipMemcpyToSymbol(mesh_name_cuda, dat, dim*size));
}
else
{
printf("error: unknown const name\n"); exit(1);
}
}
#ifdef PAPI
#include <papi.h>
#endif
void op_par_loop_compute_flux_edge_kernel_instrumented(
char const *name, op_set set,
op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4
#ifdef VERIFY_OP2_TIMING
, double* compute_time_ptr, double* sync_time_ptr
#endif
, long* iter_counts_ptr
#ifdef PAPI
, long_long* restrict event_counts, int event_set, int num_events
#endif
);
//user kernel files
#include "initialize_variables_kernel_kernel.cu"
#include "zero_5d_array_kernel_kernel.cu"
#include "zero_1d_array_kernel_kernel.cu"
#include "calculate_cell_volumes_kernel.cu"
#include "dampen_ewt_kernel.cu"
#include "copy_double_kernel_kernel.cu"
#include "calculate_dt_kernel_kernel.cu"
#include "get_min_dt_kernel_kernel.cu"
#include "compute_step_factor_kernel_kernel.cu"
#include "compute_flux_edge_kernel_kernel.cu"
#include "compute_bnd_node_flux_kernel_kernel.cu"
#include "time_step_kernel_kernel.cu"
#include "indirect_rw_kernel_kernel.cu"
#include "residual_kernel_kernel.cu"
#include "calc_rms_kernel_kernel.cu"
#include "count_bad_vals_kernel.cu"
#include "up_pre_kernel_kernel.cu"
#include "up_kernel_kernel.cu"
#include "up_post_kernel_kernel.cu"
#include "down_v2_kernel_pre_kernel.cu"
#include "down_v2_kernel_kernel.cu"
#include "down_v2_kernel_post_kernel.cu"
#include "down_kernel_kernel.cu"
#include "identify_differences_kernel.cu"
#include "count_non_zeros_kernel.cu"
| 59e6584fd8669170f0693755a80f3861520d6c98.cu | //
// auto-generated by op2.py
//
//global constants
#ifndef MAX_CONST_SIZE
#define MAX_CONST_SIZE 128
#endif
__constant__ double smoothing_coefficient_cuda;
__constant__ double ff_variable_cuda[5];
__constant__ double ff_flux_contribution_momentum_x_cuda[3];
__constant__ double ff_flux_contribution_momentum_y_cuda[3];
__constant__ double ff_flux_contribution_momentum_z_cuda[3];
__constant__ double ff_flux_contribution_density_energy_cuda[3];
__constant__ int mesh_name_cuda;
//header
#include "op_lib_cpp.h"
#include "op_cuda_rt_support.h"
#include "op_cuda_reduction.h"
void op_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
if (!OP_hybrid_gpu) return;
if (!strcmp(name,"smoothing_coefficient")) {
cutilSafeCall(cudaMemcpyToSymbol(smoothing_coefficient_cuda, dat, dim*size));
}
else
if (!strcmp(name,"ff_variable")) {
cutilSafeCall(cudaMemcpyToSymbol(ff_variable_cuda, dat, dim*size));
}
else
if (!strcmp(name,"ff_flux_contribution_momentum_x")) {
cutilSafeCall(cudaMemcpyToSymbol(ff_flux_contribution_momentum_x_cuda, dat, dim*size));
}
else
if (!strcmp(name,"ff_flux_contribution_momentum_y")) {
cutilSafeCall(cudaMemcpyToSymbol(ff_flux_contribution_momentum_y_cuda, dat, dim*size));
}
else
if (!strcmp(name,"ff_flux_contribution_momentum_z")) {
cutilSafeCall(cudaMemcpyToSymbol(ff_flux_contribution_momentum_z_cuda, dat, dim*size));
}
else
if (!strcmp(name,"ff_flux_contribution_density_energy")) {
cutilSafeCall(cudaMemcpyToSymbol(ff_flux_contribution_density_energy_cuda, dat, dim*size));
}
else
if (!strcmp(name,"mesh_name")) {
cutilSafeCall(cudaMemcpyToSymbol(mesh_name_cuda, dat, dim*size));
}
else
{
printf("error: unknown const name\n"); exit(1);
}
}
#ifdef PAPI
#include <papi.h>
#endif
void op_par_loop_compute_flux_edge_kernel_instrumented(
char const *name, op_set set,
op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4
#ifdef VERIFY_OP2_TIMING
, double* compute_time_ptr, double* sync_time_ptr
#endif
, long* iter_counts_ptr
#ifdef PAPI
, long_long* restrict event_counts, int event_set, int num_events
#endif
);
//user kernel files
#include "initialize_variables_kernel_kernel.cu"
#include "zero_5d_array_kernel_kernel.cu"
#include "zero_1d_array_kernel_kernel.cu"
#include "calculate_cell_volumes_kernel.cu"
#include "dampen_ewt_kernel.cu"
#include "copy_double_kernel_kernel.cu"
#include "calculate_dt_kernel_kernel.cu"
#include "get_min_dt_kernel_kernel.cu"
#include "compute_step_factor_kernel_kernel.cu"
#include "compute_flux_edge_kernel_kernel.cu"
#include "compute_bnd_node_flux_kernel_kernel.cu"
#include "time_step_kernel_kernel.cu"
#include "indirect_rw_kernel_kernel.cu"
#include "residual_kernel_kernel.cu"
#include "calc_rms_kernel_kernel.cu"
#include "count_bad_vals_kernel.cu"
#include "up_pre_kernel_kernel.cu"
#include "up_kernel_kernel.cu"
#include "up_post_kernel_kernel.cu"
#include "down_v2_kernel_pre_kernel.cu"
#include "down_v2_kernel_kernel.cu"
#include "down_v2_kernel_post_kernel.cu"
#include "down_kernel_kernel.cu"
#include "identify_differences_kernel.cu"
#include "count_non_zeros_kernel.cu"
|
1a548d04b77ecbfc23ba7fcf8a71693f5768e676.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* PVM_single_c.cu
Tim Behrens, Saad Jbabdi, Stam Sotiropoulos, Moises Hernandez - FMRIB Image Analysis Group
Copyright (C) 2005 University of Oxford */
/* Part of FSL - FMRIB's Software Library
http://www.fmrib.ox.ac.uk/fsl
[email protected]
Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance
Imaging of the Brain), Department of Clinical Neurology, Oxford
University, Oxford, UK
LICENCE
FMRIB Software Library, Release 5.0 (c) 2012, The University of
Oxford (the "Software")
The Software remains the property of the University of Oxford ("the
University").
The Software is distributed "AS IS" under this Licence solely for
non-commercial use in the hope that it will be useful, but in order
that the University as a charitable foundation protects its assets for
the benefit of its educational and research purposes, the University
makes clear that no condition is made or to be implied, nor is any
warranty given or to be implied, as to the accuracy of the Software,
or that it will be suitable for any particular purpose or for use
under any specific conditions. Furthermore, the University disclaims
all responsibility for the use which is made of the Software. It
further disclaims any liability for the outcomes arising from using
the Software.
The Licensee agrees to indemnify the University and hold the
University harmless from and against any and all claims, damages and
liabilities asserted by third parties (including claims for
negligence) which arise directly or indirectly from the use of the
Software or the sale of any products based on the Software.
No part of the Software may be reproduced, modified, transmitted or
transferred in any form or by any means, electronic or mechanical,
without the express permission of the University. The permission of
the University is not required if the said reproduction, modification,
transmission or transference is done without financial return, the
conditions of this Licence are imposed upon the receiver of the
product, and all original and amended source code is included in any
transmitted product. You may be held legally responsible for any
copyright infringement that is caused or encouraged by your failure to
abide by these terms and conditions.
You are not permitted under this Licence to use this Software
commercially. Use for which any financial return is received shall be
defined as commercial use, and includes (1) integration of all or part
of the source code or the Software into a product for sale or license
by or on behalf of Licensee to third parties or (2) use of the
Software or any derivative of it for research with the final aim of
developing software products for sale or license to a third party or
(3) use of the Software or any derivative of it for research with the
final aim of developing non-software products for sale or license to a
third party, or (4) use of the Software to provide any service to an
external organisation for which payment is received. If you are
interested in using the Software commercially, please contact Isis
Innovation Limited ("Isis"), the technology transfer company of the
University, to negotiate a licence. Contact details are:
[email protected] quoting reference DE/9564. */
#include "diffmodels_utils.h"
#include "levenberg_marquardt.cu"
#include "options.h"
#include <fstream>
/////////////////////////////////////
/////////////////////////////////////
/// PVM_single_c ///
/////////////////////////////////////
/////////////////////////////////////
__device__
inline float isoterm_PVM_single_c(const int pt,const float* _d,const float *bvals){
return exp(-bvals[pt]**_d);
}
__device__
inline float isoterm_lambda_PVM_single_c(const int pt,const float lambda,const float *bvals){
return(-2*bvals[pt]*lambda*exp(-bvals[pt]*lambda*lambda));
}
__device__
inline float anisoterm_PVM_single_c(const int pt,const float* _d,const float3 x, const float *bvecs, const float *bvals, const int ndirections){
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
return exp(-bvals[pt]**_d*dp*dp);
}
__device__
inline float anisoterm_lambda_PVM_single_c(const int pt,const float lambda,const float3 x, const float *bvecs, const float *bvals, const int ndirections){
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
return(-2*bvals[pt]*lambda*dp*dp*exp(-bvals[pt]*lambda*lambda*dp*dp));
}
__device__
inline float anisoterm_th_PVM_single_c(const int pt,const float* _d,const float3 x, const float _th,const float _ph,const float *bvecs, const float *bvals, const int ndirections){
float sinth,costh,sinph,cosph;
sincos(_th,&sinth,&costh);
sincos(_ph,&sinph,&cosph);
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
float dp1 = costh*(bvecs[pt]*cosph+bvecs[ndirections+pt]*sinph)-bvecs[(2*ndirections)+pt]*sinth;
return(-2*bvals[pt]**_d*dp*dp1*exp(-bvals[pt]**_d*dp*dp));
}
__device__
inline float anisoterm_ph_PVM_single_c(const int pt,const float* _d,const float3 x, const float _th,const float _ph,const float *bvecs, const float *bvals, const int ndirections){
float sinth,sinph,cosph;
sinth=sin(_th);
sincos(_ph,&sinph,&cosph);
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
float dp1 = sinth*(-bvecs[pt]*sinph+bvecs[ndirections+pt]*cosph);
return(-2*bvals[pt]**_d*dp*dp1*exp(-bvals[pt]**_d*dp*dp));
}
//If the sum of the fractions is >1, then zero as many fractions
//as necessary, so that the sum becomes smaller than 1.
//in diffmodel.cc
__device__ void fix_fsum_PVM_single_c( //INPUT
int nfib,
//INPUT - OUTPUT){
float *fs)
{
float sumf=0.0;
for(int i=0;i<nfib;i++){
sumf+=fs[i];
if(sumf>=1){
for(int j=i;j<nfib;j++)
fs[j]=FSMALL_gpu; //make the fraction almost zero
break;
}
}
}
//in diffmodel.cc
__device__ void sort_PVM_single_c(int nfib,float* params)
{
float temp_f, temp_th, temp_ph;
// Order vector descending using f parameters as index
for(int i=1; i<(nfib); i++){
for(int j=0; j<(nfib-i); j++){
if (params[2+j*3] < params[2+(j+1)*3]){
temp_f = params[2+j*3];
temp_th = params[2+j*3+1];
temp_ph = params[2+j*3+2];
params[2+j*3] = params[2+(j+1)*3];
params[2+j*3+1] = params[2+(j+1)*3+1];
params[2+j*3+2] = params[2+(j+1)*3+2];
params[2+(j+1)*3] = temp_f;
params[2+(j+1)*3+1] = temp_th;
params[2+(j+1)*3+2] = temp_ph;
}
}
}
}
__device__ void fractions_deriv_PVM_single_c( //INPUT
const float* params,
const float* fs,
const int nfib,
const int idSubVOX,
//OUTPUT
float* Deriv)
{
int nparams_per_fibre=3;
float fsum;
int k=idSubVOX%nfib;
for (int j=0; j<nfib; j++){
Deriv[j*nfib+k]=0;
}
int kk = 2+(k*nparams_per_fibre);
float sinparamkk = sin(2*params[kk]);
for (int j=0; j<nfib; j++){
int jj = 2+(j*nparams_per_fibre);
if (j==k){
fsum=1;
for (int n=0; n<=(j-1); n++){
fsum-=fs[n];
}
Deriv[j*nfib+k]=sinparamkk*fsum;
}else if (j>k){
float sinparam = sin(params[jj]);
fsum=0;
for (int n=0; n<=(j-1); n++){
fsum+=Deriv[n*nfib+k];
}
Deriv[j*nfib+k]= -(sinparam*sinparam)*fsum;
}
}
}
//cost function PVM_single_c
__device__ void cf_PVM_single_c( //INPUT
const float* params,
const float* mdata,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* reduction, //shared memory
float* fs, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
//OUTPUT
double* cfv)
{
if(idSubVOX<nfib){
int kk = 2+3*(idSubVOX);
float sinth,costh,sinph,cosph;
sincos(params[kk+1],&sinth,&costh);
sincos(params[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
*_d = lambda2d_gpu(params[1]);
*cfv = 0.0;
*sumf=0;
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(params[kk])*partial_fsum;
*sumf += fs[k];
}
}
int ndir = ndirections/THREADS_BLOCK_FIT;
if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++;
float err;
float3 x2;
int dir_iter=idSubVOX;
__syncthreads();
reduction[idSubVOX]=0;
for(int dir=0;dir<ndir;dir++){
err = 0.0;
for(int k=0;k<nfib;k++){
x2.x=x[k*3];
x2.y=x[k*3+1];
x2.z=x[k*3+2];
err += fs[k]*anisoterm_PVM_single_c(dir_iter,_d,x2,bvecs,bvals,ndirections);
}
if(m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<nfib;j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0=beta2f_gpu(params[nparams-1])*partial_fsum;
err= (params[0]*((temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_single_c(dir_iter,_d,bvals))+err))-mdata[dir_iter];
}else{
err = params[0]*((1-*sumf)*isoterm_PVM_single_c(dir_iter,_d,bvals)+err)-mdata[dir_iter];
}
reduction[idSubVOX]+= err*err;
dir_iter+=THREADS_BLOCK_FIT;
}
__syncthreads();
if(idSubVOX==0){
for(int i=0;i<THREADS_BLOCK_FIT;i++){
*cfv+=reduction[i];
}
}
}
//gradient function PVM_single_c
__device__ void grad_PVM_single_c( //INPUT
const float* params,
const float* mdata,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* f_deriv, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
//OUTPUT
float* grad)
{
if(idSubVOX<nfib){
int kk = 2+3*(idSubVOX);
float sinth,costh,sinph,cosph;
sincos(params[kk+1],&sinth,&costh);
sincos(params[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
*_d = lambda2d_gpu(params[1]);
*sumf=0;
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(params[kk])*partial_fsum;
*sumf += fs[k];
}
for (int p=0;p<nparams;p++) grad[p]=0;
}
__syncthreads();
if(idSubVOX<nfib){
fractions_deriv_PVM_single_c(params,fs,nfib,idSubVOX,f_deriv);
}
int ndir = ndirections/THREADS_BLOCK_FIT;
if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++;
int max_dir = ndirections/THREADS_BLOCK_FIT;
if(ndirections%THREADS_BLOCK_FIT) max_dir++;
float* myJ = &J[idSubVOX*nparams];
float diff;
float sig;
float Iso_term;
float3 xx;
int dir_iter=idSubVOX;
//float Aniso_terms[MAXNFIBRES]; //reuse Shared J --- myJ[kk+1]
__syncthreads();
for(int dir=0;dir<max_dir;dir++){
for (int p=0; p<nparams; p++) myJ[p]=0;
if(dir<ndir){
for(int k=0;k<nfib;k++){
int kk = 2+3*(k) +1;
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
//Aniso_terms[k]=anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
myJ[kk] = anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
}
Iso_term=isoterm_PVM_single_c(dir_iter,_d,bvals); //Precompute some terms for this datapoint
sig = 0;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
sig += fs[k]*myJ[kk+1];//Aniso_terms[k];
myJ[1] += params[0]*fs[k]*anisoterm_lambda_PVM_single_c(dir_iter,params[1],xx,bvecs,bvals,ndirections);
myJ[kk] = 0;
for (int j=0;j<nfib;j++){
if(f_deriv[j*nfib+k]!=0){
//myJ[kk] += params[0]*(Aniso_terms[j]-Iso_term)*f_deriv[j*nfib+k];
myJ[kk] += params[0]*(myJ[2+j*3+1]-Iso_term)*f_deriv[j*nfib+k];
}
}
}
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
myJ[kk+1] = params[0]*fs[k]*anisoterm_th_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
myJ[kk+2] = params[0]*fs[k]*anisoterm_ph_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
}
if(m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<(nfib);j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0=beta2f_gpu(params[nparams-1])*partial_fsum;
//derivative with respect to f0
myJ[nparams-1]= params[0]*(1-Iso_term)*sin(float(2*params[nparams-1]))*partial_fsum;
sig=params[0]*((temp_f0+(1-*sumf-temp_f0)*Iso_term)+sig);
myJ[1] += params[0]*(1-*sumf-temp_f0)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}else{
sig = params[0]*((1-*sumf)*Iso_term+sig);
myJ[1] += params[0]*(1-*sumf)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}
diff = sig - mdata[dir_iter];
myJ[0] = sig/params[0];
}
for (int p=0;p<nparams;p++){
reduction[idSubVOX]=2*myJ[p]*diff;
__syncthreads();
if(idSubVOX==0){
for(int i=0;i<THREADS_BLOCK_FIT;i++){
grad[p] += reduction[i];
}
}
__syncthreads();
}
dir_iter+=THREADS_BLOCK_FIT;
}
}
//hessian function PVM_single_c
__device__ void hess_PVM_single_c( //INPUT
const float* params,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* f_deriv, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
//OUTPUT
float* hess)
{
if(idSubVOX<nfib){
int kk = 2+3*(idSubVOX);
float sinth,costh,sinph,cosph;
sincos(params[kk+1],&sinth,&costh);
sincos(params[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
*_d = lambda2d_gpu(params[1]);
*sumf=0;
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(params[kk])*partial_fsum;
*sumf += fs[k];
}
for (int p=0;p<nparams;p++){
for (int p2=0;p2<nparams;p2++){
hess[p*nparams+p2] = 0;
}
}
}
__syncthreads();
if(idSubVOX<nfib){
fractions_deriv_PVM_single_c(params,fs,nfib,idSubVOX,f_deriv);
}
int ndir = ndirections/THREADS_BLOCK_FIT;
if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++;
int max_dir = ndirections/THREADS_BLOCK_FIT;
if(ndirections%THREADS_BLOCK_FIT) max_dir++;
float* myJ = &J[idSubVOX*nparams];
float sig;
float Iso_term;
float3 xx;
int dir_iter=idSubVOX;
//float Aniso_terms[MAXNFIBRES]; //reuse Shared J --- myJ[kk+1]
__syncthreads();
for(int dir=0;dir<max_dir;dir++){
for (int p=0; p<nparams; p++) myJ[p]=0;
if(dir<ndir){
for(int k=0;k<nfib;k++){
int kk = 2+3*(k) +1;
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
//Aniso_terms[k]=anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
myJ[kk] = anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
}
Iso_term=isoterm_PVM_single_c(dir_iter,_d,bvals); //Precompute some terms for this datapoint
sig = 0;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
sig += fs[k]*myJ[kk+1];//Aniso_terms[k];
myJ[1] += params[0]*fs[k]*anisoterm_lambda_PVM_single_c(dir_iter,params[1],xx,bvecs,bvals,ndirections);
for (int j=0; j<nfib; j++){
if (f_deriv[j*nfib+k]!=0)
//myJ[kk] += params[0]*(Aniso_terms[j]-Iso_term)*f_deriv[j*nfib+k];
myJ[kk] += params[0]*(myJ[2+3*j+1]-Iso_term)*f_deriv[j*nfib+k];
}
}
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
myJ[kk+1] = params[0]*fs[k]*anisoterm_th_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
myJ[kk+2] = params[0]*fs[k]*anisoterm_ph_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
}
if(m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<(nfib);j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0=beta2f_gpu(params[nparams-1])*partial_fsum;
//derivative with respect to f0
myJ[nparams-1]= params[0]*(1-Iso_term)*sin(float(2*params[nparams-1]))*partial_fsum;
sig= params[0]*((temp_f0+(1-*sumf-temp_f0)*Iso_term)+sig);
myJ[1] += params[0]*(1-*sumf-temp_f0)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}else{
sig = params[0]*((1-*sumf)*Iso_term+sig);
myJ[1] += params[0]*(1-*sumf)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}
myJ[0] = sig/params[0];
}
for (int p=0;p<nparams;p++){
for (int p2=p;p2<nparams;p2++){
reduction[idSubVOX]=2*(myJ[p]*myJ[p2]);
__syncthreads();
if(idSubVOX==0){
for(int i=0;i<THREADS_BLOCK_FIT;i++){
hess[p*nparams+p2] += reduction[i];
}
}
__syncthreads();
}
}
dir_iter+=THREADS_BLOCK_FIT;
}
if(idSubVOX==0){
for (int j=0; j<nparams; j++) {
for (int i=j+1; i<nparams; i++) {
hess[i*nparams+j]=hess[j*nparams+i];
}
}
}
}
//in diffmodel.cc
extern "C" __global__ void fit_PVM_single_c_kernel( //INPUT
const float* data,
const float* bvecs,
const float* bvals,
const int nvox,
const int ndirections,
const int nfib,
const int nparams,
const bool m_eval_BIC,
const bool m_include_f0,
const bool m_return_fanning,
const bool gradnonlin,
//INPUT - OUTPUT
float* params)
{
int idSubVOX = threadIdx.x;
int idVOX = blockIdx.x;
int threadsBlock = blockDim.x;
////////// DYNAMIC SHARED MEMORY ///////////
extern __shared__ double shared[];
double* pcf = (double*) shared; //1
double* ncf = (double*) &pcf[1]; //1
double* lambda = (double*) &ncf[1]; //1
double* cftol = (double*) &lambda[1]; //1
double* ltol = (double*) &cftol[1]; //1
double* olambda = (double*) <ol[1]; //1
float* J = (float*)&olambda[1]; //threadsBlock*nparams
float* reduction = (float*)&J[threadsBlock*nparams]; //threadsBlock
float* myparams = (float*) &reduction[threadsBlock]; //nparams
float* grad = (float*) &myparams[nparams]; //nparams
float* hess = (float*) &grad[nparams]; //nparams*nparams
float* step = (float*) &hess[nparams*nparams]; //nparams
float* inverse = (float*) &step[nparams]; //nparams
float* fs = (float*) &inverse[nparams]; //nfib
float* f_deriv = (float*) &fs[nfib]; //nfib*nfib
float* x = (float*) &f_deriv[nfib*nfib]; //nfib*3
float* _d = (float*) &x[nfib*3]; //1
float* sumf = (float*) &_d[1]; //1
float* C = (float*)&sumf[1]; //nparams*nparams;
float* el = (float*)&C[nparams*nparams]; //nparams
int* indx = (int*)&el[nparams]; //nparams
int* success = (int*) &indx[nparams]; //1
int* end = (int*) &success[1]; //1
////////// DYNAMIC SHARED MEMORY ///////////
if(idSubVOX<nparams){
myparams[idSubVOX]=params[(idVOX*nparams)+idSubVOX];
}
__syncthreads();
int pos_bvals, pos_bvecs;
if(gradnonlin){
pos_bvals=idVOX*ndirections;
pos_bvecs=idVOX*3*ndirections;
}else{
pos_bvals=0;
pos_bvecs=0;
}
//do the fit
levenberg_marquardt_PVM_single_c_gpu(&data[idVOX*ndirections],&bvecs[pos_bvecs],&bvals[pos_bvals],ndirections,nfib,nparams,m_include_f0,idSubVOX,step,grad,hess,inverse, pcf,ncf,lambda,cftol,ltol,olambda,success,end,J,reduction,fs,f_deriv,x,_d,sumf,C,el,indx,myparams);
__syncthreads();
// finalise parameters
// m_s0-myparams[0] m_d-myparams[1] m_f-m_th-m_ph-myparams[2,3,4,5, etc..] m_f0-myparams[nparams-1]
if(idSubVOX==0){
myparams[1] = lambda2d_gpu(myparams[1]);
for(int k=0;k<nfib;k++){
int kk = 2 + 3*(k);
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=myparams[2 + 3*j];
//////////////////////////
myparams[kk] = beta2f_gpu(myparams[kk])*partial_fsum;
}
if (m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<(nfib);j++){
partial_fsum-=myparams[2 + 3*j];
}
//////////////////////////
myparams[nparams-1]= beta2f_gpu(myparams[nparams-1])*partial_fsum;
}
sort_PVM_single_c(nfib,myparams);
}
__syncthreads();
if(idSubVOX<nparams){
params[(idVOX*nparams)+idSubVOX] = myparams[idSubVOX];
}
}
//in diffmodel.cc
extern "C" __global__ void get_residuals_PVM_single_c_kernel( //INPUT
const float* data,
const float* params,
const float* bvecs,
const float* bvals,
const int nvox,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const bool gradnonlin,
const bool* includes_f0,
//OUTPUT
float* residuals)
{
int idSubVOX = threadIdx.x;
int idVOX = blockIdx.x;
int threadsBlock = blockDim.x;
////////// DYNAMIC SHARED MEMORY ///////////
extern __shared__ double shared[];
float* myparams = (float*) shared; //nparams
float* fs = (float*) &myparams[nparams]; //nfib
float* x = (float*) &fs[nfib]; //nfib*3
float* _d = (float*) &x[nfib*3]; //1
float* sumf = (float*) &_d[1]; //1
int* my_include_f0 = (int*) &sumf[1]; //1
////////// DYNAMIC SHARED MEMORY ///////////
float val;
float predicted_signal;
float mydata;
if(idSubVOX==0){
*my_include_f0 = includes_f0[idVOX];
//m_s0-myparams[0] m_d-myparams[1] m_f-m_th-m_ph-myparams[2,3,4,5 etc..] m_f0-myparams[nparams-1]
myparams[0]=params[(idVOX*nparams)+0];
if(myparams[1]<0) myparams[1] = 0; //This can be due to numerical errors..sqrt
else myparams[1] = d2lambda_gpu(params[(idVOX*nparams)+1]);
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*k;
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = params[(idVOX*nparams)+kk];
float tmpr=fs[k]/partial_fsum;
if (tmpr>1.0) tmpr=1; //This can be due to numerical errors
if (tmpr<0.0) tmpr=0; //This can be due to numerical errors..sqrt
myparams[kk] = f2beta_gpu(tmpr);
myparams[kk+1] = params[(idVOX*nparams)+kk+1];
myparams[kk+2] = params[(idVOX*nparams)+kk+2];
}
if (*my_include_f0){
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<nfib;j++)
partial_fsum-=fs[j];
//////////////////////////
float tmpr=params[(idVOX*nparams)+nparams-1]/partial_fsum;
if (tmpr>1.0) tmpr=1; //This can be due to numerical errors..asin
if (tmpr<0.0) tmpr=0; //This can be due to numerical errors..sqrt
myparams[nparams-1]= f2beta_gpu(tmpr);
}
}
__syncthreads();
if(idSubVOX<nfib){
int kk = 2+3*idSubVOX;
float sinth,costh,sinph,cosph;
sincos(myparams[kk+1],&sinth,&costh);
sincos(myparams[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
float partial_fsum;
*sumf=0;
for(int k=0;k<nfib;k++){
int kk = 2+3*k;
////// partial_fsum //////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(myparams[kk])*partial_fsum;
*sumf += fs[k];
}
*_d = lambda2d_gpu(myparams[1]);
}
int ndir = ndirections/threadsBlock;
if(idSubVOX<(ndirections%threadsBlock)) ndir++;
float3 x2;
int dir_iter=idSubVOX;
__syncthreads();
int pos_bvals, pos_bvecs;
if(gradnonlin){
pos_bvals=idVOX*ndirections;
pos_bvecs=idVOX*3*ndirections;
}else{
pos_bvals=0;
pos_bvecs=0;
}
for(int dir=0;dir<ndir;dir++){
mydata = data[(idVOX*ndirections)+dir_iter];
predicted_signal=0; //pred = 0;
val = 0.0;
for(int k=0;k<nfib;k++){
x2.x=x[k*3];
x2.y=x[k*3+1];
x2.z=x[k*3+2];
val += fs[k]*anisoterm_PVM_single_c(dir_iter,_d,x2,&bvecs[pos_bvecs],&bvals[pos_bvals],ndirections);
}
if (*my_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<nfib;j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0= beta2f_gpu(myparams[nparams-1])*partial_fsum;
predicted_signal = myparams[0]*(temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_single_c(dir_iter,_d,&bvals[pos_bvals])+val);
}else{
predicted_signal = myparams[0]*((1-*sumf)*isoterm_PVM_single_c(dir_iter,_d,&bvals[pos_bvals])+val);
}
//residuals=m_data-predicted_signal;
residuals[idVOX*ndirections+dir_iter]= mydata - predicted_signal;
dir_iter+=threadsBlock;
}
}
| 1a548d04b77ecbfc23ba7fcf8a71693f5768e676.cu | /* PVM_single_c.cu
Tim Behrens, Saad Jbabdi, Stam Sotiropoulos, Moises Hernandez - FMRIB Image Analysis Group
Copyright (C) 2005 University of Oxford */
/* Part of FSL - FMRIB's Software Library
http://www.fmrib.ox.ac.uk/fsl
[email protected]
Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance
Imaging of the Brain), Department of Clinical Neurology, Oxford
University, Oxford, UK
LICENCE
FMRIB Software Library, Release 5.0 (c) 2012, The University of
Oxford (the "Software")
The Software remains the property of the University of Oxford ("the
University").
The Software is distributed "AS IS" under this Licence solely for
non-commercial use in the hope that it will be useful, but in order
that the University as a charitable foundation protects its assets for
the benefit of its educational and research purposes, the University
makes clear that no condition is made or to be implied, nor is any
warranty given or to be implied, as to the accuracy of the Software,
or that it will be suitable for any particular purpose or for use
under any specific conditions. Furthermore, the University disclaims
all responsibility for the use which is made of the Software. It
further disclaims any liability for the outcomes arising from using
the Software.
The Licensee agrees to indemnify the University and hold the
University harmless from and against any and all claims, damages and
liabilities asserted by third parties (including claims for
negligence) which arise directly or indirectly from the use of the
Software or the sale of any products based on the Software.
No part of the Software may be reproduced, modified, transmitted or
transferred in any form or by any means, electronic or mechanical,
without the express permission of the University. The permission of
the University is not required if the said reproduction, modification,
transmission or transference is done without financial return, the
conditions of this Licence are imposed upon the receiver of the
product, and all original and amended source code is included in any
transmitted product. You may be held legally responsible for any
copyright infringement that is caused or encouraged by your failure to
abide by these terms and conditions.
You are not permitted under this Licence to use this Software
commercially. Use for which any financial return is received shall be
defined as commercial use, and includes (1) integration of all or part
of the source code or the Software into a product for sale or license
by or on behalf of Licensee to third parties or (2) use of the
Software or any derivative of it for research with the final aim of
developing software products for sale or license to a third party or
(3) use of the Software or any derivative of it for research with the
final aim of developing non-software products for sale or license to a
third party, or (4) use of the Software to provide any service to an
external organisation for which payment is received. If you are
interested in using the Software commercially, please contact Isis
Innovation Limited ("Isis"), the technology transfer company of the
University, to negotiate a licence. Contact details are:
[email protected] quoting reference DE/9564. */
#include "diffmodels_utils.h"
#include "levenberg_marquardt.cu"
#include "options.h"
#include <fstream>
/////////////////////////////////////
/////////////////////////////////////
/// PVM_single_c ///
/////////////////////////////////////
/////////////////////////////////////
__device__
inline float isoterm_PVM_single_c(const int pt,const float* _d,const float *bvals){
return exp(-bvals[pt]**_d);
}
__device__
inline float isoterm_lambda_PVM_single_c(const int pt,const float lambda,const float *bvals){
return(-2*bvals[pt]*lambda*exp(-bvals[pt]*lambda*lambda));
}
__device__
inline float anisoterm_PVM_single_c(const int pt,const float* _d,const float3 x, const float *bvecs, const float *bvals, const int ndirections){
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
return exp(-bvals[pt]**_d*dp*dp);
}
__device__
inline float anisoterm_lambda_PVM_single_c(const int pt,const float lambda,const float3 x, const float *bvecs, const float *bvals, const int ndirections){
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
return(-2*bvals[pt]*lambda*dp*dp*exp(-bvals[pt]*lambda*lambda*dp*dp));
}
__device__
inline float anisoterm_th_PVM_single_c(const int pt,const float* _d,const float3 x, const float _th,const float _ph,const float *bvecs, const float *bvals, const int ndirections){
float sinth,costh,sinph,cosph;
sincos(_th,&sinth,&costh);
sincos(_ph,&sinph,&cosph);
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
float dp1 = costh*(bvecs[pt]*cosph+bvecs[ndirections+pt]*sinph)-bvecs[(2*ndirections)+pt]*sinth;
return(-2*bvals[pt]**_d*dp*dp1*exp(-bvals[pt]**_d*dp*dp));
}
__device__
inline float anisoterm_ph_PVM_single_c(const int pt,const float* _d,const float3 x, const float _th,const float _ph,const float *bvecs, const float *bvals, const int ndirections){
float sinth,sinph,cosph;
sinth=sin(_th);
sincos(_ph,&sinph,&cosph);
float dp = bvecs[pt]*x.x+bvecs[ndirections+pt]*x.y+bvecs[(2*ndirections)+pt]*x.z;
float dp1 = sinth*(-bvecs[pt]*sinph+bvecs[ndirections+pt]*cosph);
return(-2*bvals[pt]**_d*dp*dp1*exp(-bvals[pt]**_d*dp*dp));
}
//If the sum of the fractions is >1, then zero as many fractions
//as necessary, so that the sum becomes smaller than 1.
//in diffmodel.cc
__device__ void fix_fsum_PVM_single_c( //INPUT
int nfib,
//INPUT - OUTPUT){
float *fs)
{
float sumf=0.0;
for(int i=0;i<nfib;i++){
sumf+=fs[i];
if(sumf>=1){
for(int j=i;j<nfib;j++)
fs[j]=FSMALL_gpu; //make the fraction almost zero
break;
}
}
}
//in diffmodel.cc
__device__ void sort_PVM_single_c(int nfib,float* params)
{
float temp_f, temp_th, temp_ph;
// Order vector descending using f parameters as index
for(int i=1; i<(nfib); i++){
for(int j=0; j<(nfib-i); j++){
if (params[2+j*3] < params[2+(j+1)*3]){
temp_f = params[2+j*3];
temp_th = params[2+j*3+1];
temp_ph = params[2+j*3+2];
params[2+j*3] = params[2+(j+1)*3];
params[2+j*3+1] = params[2+(j+1)*3+1];
params[2+j*3+2] = params[2+(j+1)*3+2];
params[2+(j+1)*3] = temp_f;
params[2+(j+1)*3+1] = temp_th;
params[2+(j+1)*3+2] = temp_ph;
}
}
}
}
__device__ void fractions_deriv_PVM_single_c( //INPUT
const float* params,
const float* fs,
const int nfib,
const int idSubVOX,
//OUTPUT
float* Deriv)
{
int nparams_per_fibre=3;
float fsum;
int k=idSubVOX%nfib;
for (int j=0; j<nfib; j++){
Deriv[j*nfib+k]=0;
}
int kk = 2+(k*nparams_per_fibre);
float sinparamkk = sin(2*params[kk]);
for (int j=0; j<nfib; j++){
int jj = 2+(j*nparams_per_fibre);
if (j==k){
fsum=1;
for (int n=0; n<=(j-1); n++){
fsum-=fs[n];
}
Deriv[j*nfib+k]=sinparamkk*fsum;
}else if (j>k){
float sinparam = sin(params[jj]);
fsum=0;
for (int n=0; n<=(j-1); n++){
fsum+=Deriv[n*nfib+k];
}
Deriv[j*nfib+k]= -(sinparam*sinparam)*fsum;
}
}
}
//cost function PVM_single_c
__device__ void cf_PVM_single_c( //INPUT
const float* params,
const float* mdata,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* reduction, //shared memory
float* fs, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
//OUTPUT
double* cfv)
{
if(idSubVOX<nfib){
int kk = 2+3*(idSubVOX);
float sinth,costh,sinph,cosph;
sincos(params[kk+1],&sinth,&costh);
sincos(params[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
*_d = lambda2d_gpu(params[1]);
*cfv = 0.0;
*sumf=0;
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(params[kk])*partial_fsum;
*sumf += fs[k];
}
}
int ndir = ndirections/THREADS_BLOCK_FIT;
if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++;
float err;
float3 x2;
int dir_iter=idSubVOX;
__syncthreads();
reduction[idSubVOX]=0;
for(int dir=0;dir<ndir;dir++){
err = 0.0;
for(int k=0;k<nfib;k++){
x2.x=x[k*3];
x2.y=x[k*3+1];
x2.z=x[k*3+2];
err += fs[k]*anisoterm_PVM_single_c(dir_iter,_d,x2,bvecs,bvals,ndirections);
}
if(m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<nfib;j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0=beta2f_gpu(params[nparams-1])*partial_fsum;
err= (params[0]*((temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_single_c(dir_iter,_d,bvals))+err))-mdata[dir_iter];
}else{
err = params[0]*((1-*sumf)*isoterm_PVM_single_c(dir_iter,_d,bvals)+err)-mdata[dir_iter];
}
reduction[idSubVOX]+= err*err;
dir_iter+=THREADS_BLOCK_FIT;
}
__syncthreads();
if(idSubVOX==0){
for(int i=0;i<THREADS_BLOCK_FIT;i++){
*cfv+=reduction[i];
}
}
}
//gradient function PVM_single_c
__device__ void grad_PVM_single_c( //INPUT
const float* params,
const float* mdata,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* f_deriv, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
//OUTPUT
float* grad)
{
if(idSubVOX<nfib){
int kk = 2+3*(idSubVOX);
float sinth,costh,sinph,cosph;
sincos(params[kk+1],&sinth,&costh);
sincos(params[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
*_d = lambda2d_gpu(params[1]);
*sumf=0;
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(params[kk])*partial_fsum;
*sumf += fs[k];
}
for (int p=0;p<nparams;p++) grad[p]=0;
}
__syncthreads();
if(idSubVOX<nfib){
fractions_deriv_PVM_single_c(params,fs,nfib,idSubVOX,f_deriv);
}
int ndir = ndirections/THREADS_BLOCK_FIT;
if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++;
int max_dir = ndirections/THREADS_BLOCK_FIT;
if(ndirections%THREADS_BLOCK_FIT) max_dir++;
float* myJ = &J[idSubVOX*nparams];
float diff;
float sig;
float Iso_term;
float3 xx;
int dir_iter=idSubVOX;
//float Aniso_terms[MAXNFIBRES]; //reuse Shared J --- myJ[kk+1]
__syncthreads();
for(int dir=0;dir<max_dir;dir++){
for (int p=0; p<nparams; p++) myJ[p]=0;
if(dir<ndir){
for(int k=0;k<nfib;k++){
int kk = 2+3*(k) +1;
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
//Aniso_terms[k]=anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
myJ[kk] = anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
}
Iso_term=isoterm_PVM_single_c(dir_iter,_d,bvals); //Precompute some terms for this datapoint
sig = 0;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
sig += fs[k]*myJ[kk+1];//Aniso_terms[k];
myJ[1] += params[0]*fs[k]*anisoterm_lambda_PVM_single_c(dir_iter,params[1],xx,bvecs,bvals,ndirections);
myJ[kk] = 0;
for (int j=0;j<nfib;j++){
if(f_deriv[j*nfib+k]!=0){
//myJ[kk] += params[0]*(Aniso_terms[j]-Iso_term)*f_deriv[j*nfib+k];
myJ[kk] += params[0]*(myJ[2+j*3+1]-Iso_term)*f_deriv[j*nfib+k];
}
}
}
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
myJ[kk+1] = params[0]*fs[k]*anisoterm_th_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
myJ[kk+2] = params[0]*fs[k]*anisoterm_ph_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
}
if(m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<(nfib);j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0=beta2f_gpu(params[nparams-1])*partial_fsum;
//derivative with respect to f0
myJ[nparams-1]= params[0]*(1-Iso_term)*sin(float(2*params[nparams-1]))*partial_fsum;
sig=params[0]*((temp_f0+(1-*sumf-temp_f0)*Iso_term)+sig);
myJ[1] += params[0]*(1-*sumf-temp_f0)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}else{
sig = params[0]*((1-*sumf)*Iso_term+sig);
myJ[1] += params[0]*(1-*sumf)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}
diff = sig - mdata[dir_iter];
myJ[0] = sig/params[0];
}
for (int p=0;p<nparams;p++){
reduction[idSubVOX]=2*myJ[p]*diff;
__syncthreads();
if(idSubVOX==0){
for(int i=0;i<THREADS_BLOCK_FIT;i++){
grad[p] += reduction[i];
}
}
__syncthreads();
}
dir_iter+=THREADS_BLOCK_FIT;
}
}
//hessian function PVM_single_c
__device__ void hess_PVM_single_c( //INPUT
const float* params,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* f_deriv, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
//OUTPUT
float* hess)
{
if(idSubVOX<nfib){
int kk = 2+3*(idSubVOX);
float sinth,costh,sinph,cosph;
sincos(params[kk+1],&sinth,&costh);
sincos(params[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
*_d = lambda2d_gpu(params[1]);
*sumf=0;
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(params[kk])*partial_fsum;
*sumf += fs[k];
}
for (int p=0;p<nparams;p++){
for (int p2=0;p2<nparams;p2++){
hess[p*nparams+p2] = 0;
}
}
}
__syncthreads();
if(idSubVOX<nfib){
fractions_deriv_PVM_single_c(params,fs,nfib,idSubVOX,f_deriv);
}
int ndir = ndirections/THREADS_BLOCK_FIT;
if(idSubVOX<(ndirections%THREADS_BLOCK_FIT)) ndir++;
int max_dir = ndirections/THREADS_BLOCK_FIT;
if(ndirections%THREADS_BLOCK_FIT) max_dir++;
float* myJ = &J[idSubVOX*nparams];
float sig;
float Iso_term;
float3 xx;
int dir_iter=idSubVOX;
//float Aniso_terms[MAXNFIBRES]; //reuse Shared J --- myJ[kk+1]
__syncthreads();
for(int dir=0;dir<max_dir;dir++){
for (int p=0; p<nparams; p++) myJ[p]=0;
if(dir<ndir){
for(int k=0;k<nfib;k++){
int kk = 2+3*(k) +1;
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
//Aniso_terms[k]=anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
myJ[kk] = anisoterm_PVM_single_c(dir_iter,_d,xx,bvecs,bvals,ndirections);
}
Iso_term=isoterm_PVM_single_c(dir_iter,_d,bvals); //Precompute some terms for this datapoint
sig = 0;
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
sig += fs[k]*myJ[kk+1];//Aniso_terms[k];
myJ[1] += params[0]*fs[k]*anisoterm_lambda_PVM_single_c(dir_iter,params[1],xx,bvecs,bvals,ndirections);
for (int j=0; j<nfib; j++){
if (f_deriv[j*nfib+k]!=0)
//myJ[kk] += params[0]*(Aniso_terms[j]-Iso_term)*f_deriv[j*nfib+k];
myJ[kk] += params[0]*(myJ[2+3*j+1]-Iso_term)*f_deriv[j*nfib+k];
}
}
for(int k=0;k<nfib;k++){
int kk = 2+3*(k);
xx.x=x[k*3];
xx.y=x[k*3+1];
xx.z=x[k*3+2];
myJ[kk+1] = params[0]*fs[k]*anisoterm_th_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
myJ[kk+2] = params[0]*fs[k]*anisoterm_ph_PVM_single_c(dir_iter,_d,xx,params[kk+1],params[kk+2],bvecs,bvals,ndirections);
}
if(m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<(nfib);j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0=beta2f_gpu(params[nparams-1])*partial_fsum;
//derivative with respect to f0
myJ[nparams-1]= params[0]*(1-Iso_term)*sin(float(2*params[nparams-1]))*partial_fsum;
sig= params[0]*((temp_f0+(1-*sumf-temp_f0)*Iso_term)+sig);
myJ[1] += params[0]*(1-*sumf-temp_f0)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}else{
sig = params[0]*((1-*sumf)*Iso_term+sig);
myJ[1] += params[0]*(1-*sumf)*isoterm_lambda_PVM_single_c(dir_iter,params[1],bvals);
}
myJ[0] = sig/params[0];
}
for (int p=0;p<nparams;p++){
for (int p2=p;p2<nparams;p2++){
reduction[idSubVOX]=2*(myJ[p]*myJ[p2]);
__syncthreads();
if(idSubVOX==0){
for(int i=0;i<THREADS_BLOCK_FIT;i++){
hess[p*nparams+p2] += reduction[i];
}
}
__syncthreads();
}
}
dir_iter+=THREADS_BLOCK_FIT;
}
if(idSubVOX==0){
for (int j=0; j<nparams; j++) {
for (int i=j+1; i<nparams; i++) {
hess[i*nparams+j]=hess[j*nparams+i];
}
}
}
}
//in diffmodel.cc
extern "C" __global__ void fit_PVM_single_c_kernel( //INPUT
const float* data,
const float* bvecs,
const float* bvals,
const int nvox,
const int ndirections,
const int nfib,
const int nparams,
const bool m_eval_BIC,
const bool m_include_f0,
const bool m_return_fanning,
const bool gradnonlin,
//INPUT - OUTPUT
float* params)
{
int idSubVOX = threadIdx.x;
int idVOX = blockIdx.x;
int threadsBlock = blockDim.x;
////////// DYNAMIC SHARED MEMORY ///////////
extern __shared__ double shared[];
double* pcf = (double*) shared; //1
double* ncf = (double*) &pcf[1]; //1
double* lambda = (double*) &ncf[1]; //1
double* cftol = (double*) &lambda[1]; //1
double* ltol = (double*) &cftol[1]; //1
double* olambda = (double*) <ol[1]; //1
float* J = (float*)&olambda[1]; //threadsBlock*nparams
float* reduction = (float*)&J[threadsBlock*nparams]; //threadsBlock
float* myparams = (float*) &reduction[threadsBlock]; //nparams
float* grad = (float*) &myparams[nparams]; //nparams
float* hess = (float*) &grad[nparams]; //nparams*nparams
float* step = (float*) &hess[nparams*nparams]; //nparams
float* inverse = (float*) &step[nparams]; //nparams
float* fs = (float*) &inverse[nparams]; //nfib
float* f_deriv = (float*) &fs[nfib]; //nfib*nfib
float* x = (float*) &f_deriv[nfib*nfib]; //nfib*3
float* _d = (float*) &x[nfib*3]; //1
float* sumf = (float*) &_d[1]; //1
float* C = (float*)&sumf[1]; //nparams*nparams;
float* el = (float*)&C[nparams*nparams]; //nparams
int* indx = (int*)&el[nparams]; //nparams
int* success = (int*) &indx[nparams]; //1
int* end = (int*) &success[1]; //1
////////// DYNAMIC SHARED MEMORY ///////////
if(idSubVOX<nparams){
myparams[idSubVOX]=params[(idVOX*nparams)+idSubVOX];
}
__syncthreads();
int pos_bvals, pos_bvecs;
if(gradnonlin){
pos_bvals=idVOX*ndirections;
pos_bvecs=idVOX*3*ndirections;
}else{
pos_bvals=0;
pos_bvecs=0;
}
//do the fit
levenberg_marquardt_PVM_single_c_gpu(&data[idVOX*ndirections],&bvecs[pos_bvecs],&bvals[pos_bvals],ndirections,nfib,nparams,m_include_f0,idSubVOX,step,grad,hess,inverse, pcf,ncf,lambda,cftol,ltol,olambda,success,end,J,reduction,fs,f_deriv,x,_d,sumf,C,el,indx,myparams);
__syncthreads();
// finalise parameters
// m_s0-myparams[0] m_d-myparams[1] m_f-m_th-m_ph-myparams[2,3,4,5, etc..] m_f0-myparams[nparams-1]
if(idSubVOX==0){
myparams[1] = lambda2d_gpu(myparams[1]);
for(int k=0;k<nfib;k++){
int kk = 2 + 3*(k);
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=myparams[2 + 3*j];
//////////////////////////
myparams[kk] = beta2f_gpu(myparams[kk])*partial_fsum;
}
if (m_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<(nfib);j++){
partial_fsum-=myparams[2 + 3*j];
}
//////////////////////////
myparams[nparams-1]= beta2f_gpu(myparams[nparams-1])*partial_fsum;
}
sort_PVM_single_c(nfib,myparams);
}
__syncthreads();
if(idSubVOX<nparams){
params[(idVOX*nparams)+idSubVOX] = myparams[idSubVOX];
}
}
//in diffmodel.cc
extern "C" __global__ void get_residuals_PVM_single_c_kernel( //INPUT
const float* data,
const float* params,
const float* bvecs,
const float* bvals,
const int nvox,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const bool gradnonlin,
const bool* includes_f0,
//OUTPUT
float* residuals)
{
int idSubVOX = threadIdx.x;
int idVOX = blockIdx.x;
int threadsBlock = blockDim.x;
////////// DYNAMIC SHARED MEMORY ///////////
extern __shared__ double shared[];
float* myparams = (float*) shared; //nparams
float* fs = (float*) &myparams[nparams]; //nfib
float* x = (float*) &fs[nfib]; //nfib*3
float* _d = (float*) &x[nfib*3]; //1
float* sumf = (float*) &_d[1]; //1
int* my_include_f0 = (int*) &sumf[1]; //1
////////// DYNAMIC SHARED MEMORY ///////////
float val;
float predicted_signal;
float mydata;
if(idSubVOX==0){
*my_include_f0 = includes_f0[idVOX];
//m_s0-myparams[0] m_d-myparams[1] m_f-m_th-m_ph-myparams[2,3,4,5 etc..] m_f0-myparams[nparams-1]
myparams[0]=params[(idVOX*nparams)+0];
if(myparams[1]<0) myparams[1] = 0; //This can be due to numerical errors..sqrt
else myparams[1] = d2lambda_gpu(params[(idVOX*nparams)+1]);
float partial_fsum;
for(int k=0;k<nfib;k++){
int kk = 2+3*k;
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = params[(idVOX*nparams)+kk];
float tmpr=fs[k]/partial_fsum;
if (tmpr>1.0) tmpr=1; //This can be due to numerical errors
if (tmpr<0.0) tmpr=0; //This can be due to numerical errors..sqrt
myparams[kk] = f2beta_gpu(tmpr);
myparams[kk+1] = params[(idVOX*nparams)+kk+1];
myparams[kk+2] = params[(idVOX*nparams)+kk+2];
}
if (*my_include_f0){
//partial_fsum ///////////
partial_fsum=1.0;
for(int j=0;j<nfib;j++)
partial_fsum-=fs[j];
//////////////////////////
float tmpr=params[(idVOX*nparams)+nparams-1]/partial_fsum;
if (tmpr>1.0) tmpr=1; //This can be due to numerical errors..asin
if (tmpr<0.0) tmpr=0; //This can be due to numerical errors..sqrt
myparams[nparams-1]= f2beta_gpu(tmpr);
}
}
__syncthreads();
if(idSubVOX<nfib){
int kk = 2+3*idSubVOX;
float sinth,costh,sinph,cosph;
sincos(myparams[kk+1],&sinth,&costh);
sincos(myparams[kk+2],&sinph,&cosph);
x[idSubVOX*3] = sinth*cosph;
x[idSubVOX*3+1] = sinth*sinph;
x[idSubVOX*3+2] = costh;
}
if(idSubVOX==0){
float partial_fsum;
*sumf=0;
for(int k=0;k<nfib;k++){
int kk = 2+3*k;
////// partial_fsum //////
partial_fsum=1.0;
for(int j=0;j<k;j++)
partial_fsum-=fs[j];
//////////////////////////
fs[k] = beta2f_gpu(myparams[kk])*partial_fsum;
*sumf += fs[k];
}
*_d = lambda2d_gpu(myparams[1]);
}
int ndir = ndirections/threadsBlock;
if(idSubVOX<(ndirections%threadsBlock)) ndir++;
float3 x2;
int dir_iter=idSubVOX;
__syncthreads();
int pos_bvals, pos_bvecs;
if(gradnonlin){
pos_bvals=idVOX*ndirections;
pos_bvecs=idVOX*3*ndirections;
}else{
pos_bvals=0;
pos_bvecs=0;
}
for(int dir=0;dir<ndir;dir++){
mydata = data[(idVOX*ndirections)+dir_iter];
predicted_signal=0; //pred = 0;
val = 0.0;
for(int k=0;k<nfib;k++){
x2.x=x[k*3];
x2.y=x[k*3+1];
x2.z=x[k*3+2];
val += fs[k]*anisoterm_PVM_single_c(dir_iter,_d,x2,&bvecs[pos_bvecs],&bvals[pos_bvals],ndirections);
}
if (*my_include_f0){
//partial_fsum ///////////
float partial_fsum=1.0;
for(int j=0;j<nfib;j++)
partial_fsum-=fs[j];
//////////////////////////
float temp_f0= beta2f_gpu(myparams[nparams-1])*partial_fsum;
predicted_signal = myparams[0]*(temp_f0+(1-*sumf-temp_f0)*isoterm_PVM_single_c(dir_iter,_d,&bvals[pos_bvals])+val);
}else{
predicted_signal = myparams[0]*((1-*sumf)*isoterm_PVM_single_c(dir_iter,_d,&bvals[pos_bvals])+val);
}
//residuals=m_data-predicted_signal;
residuals[idVOX*ndirections+dir_iter]= mydata - predicted_signal;
dir_iter+=threadsBlock;
}
}
|
247d22e0fb81cd7eca19a2f8826e68ca436a825c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void reduceKernel(int *input, int *output, int N)
{
int tid = threadIdx.x;
int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
extern __shared__ int sdata[];
sdata[tid] = 0;
//perform first level of reduction, reading from global memory, writing to shared memory
int sum = (i < N) ? input[i] : 0;
if (i + blockDim.x < N) sum += input[i+blockDim.x];
sdata[tid] = sum;
//synchronise threads in this block before manipulating with the data
__syncthreads();
//do reduction in shared memory
for (int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
//write result for this block to global mem
if(tid == 0) output[blockIdx.x] = sdata[0];
}
int nextPow2(int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
int main(int argc, char **argv)
{
//number of elements in the array
int N = 4000000;
//set the number of threads
int maxThreads = 128;
//grid and block sizes
int threads = (N < maxThreads*2) ? nextPow2((N + 1)/ 2) : maxThreads;
int blocks = (N + (threads * 2 - 1)) / (threads * 2);
dim3 grid(blocks, 1, 1);
dim3 block(threads, 1, 1);
//print the number of elements
printf("\n======================\n");
printf("Parallel reduction sum\n");
printf("======================\n\n");
printf("Total number of elements to sum: %i\n", N);
printf("Kernel launch configuration: %i blocks of %i threads\n", grid.x, block.x);
//host memory pointer
int *data_h;
//allocate host memory
data_h = (int*)malloc(N*sizeof(int));
//initialise random number generator seed based on current time
srand(time(NULL));
//generate data
for (int i=0; i<N; i++) data_h[i] = 1;
//device memory pointers
int *data_d;
int *blockSum_d;
//allocate device memory
hipMalloc((void **)&data_d, N * sizeof(int));
hipMalloc((void **)&blockSum_d, grid.x * sizeof(int));
//copy memory to device
hipMemcpy(data_d, data_h, N * sizeof(int), hipMemcpyHostToDevice);
//calculate sums on device
float timeGPU;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//level 0
printf("Level 0 kernel summing %i elements with %i blocks of %i threads...\n", N, grid.x, block.x);
hipLaunchKernelGGL(( reduceKernel), dim3(grid), dim3(block), block.x*sizeof(int), 0, data_d, blockSum_d, N);
//level 1+
int remainingElements = grid.x;
int level = 1;
while(remainingElements > 1)
{
threads = (remainingElements < maxThreads*2) ? nextPow2((remainingElements + 1)/ 2) : maxThreads;
blocks = (remainingElements + (threads * 2 - 1)) / (threads * 2);
printf("Level %i kernel summing %i elements with %i blocks of %i threads...\n", level, remainingElements, blocks, threads);
hipLaunchKernelGGL(( reduceKernel), dim3(blocks), dim3(threads), threads*sizeof(int), 0, blockSum_d, blockSum_d, remainingElements);
remainingElements = blocks;
level++;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeGPU, start, stop);
//copy results back to host
int sumGPU;
hipMemcpy(&sumGPU, blockSum_d, sizeof(int), hipMemcpyDeviceToHost);
//print result
printf("result: %i time: %f ms throughput: %.4f GB/s\n", sumGPU, timeGPU, 1.0e-9 * ((double)N*sizeof(int))/(timeGPU/1000));
//hipDeviceReset must be called before exiting in order for profiling and tracing tools such as Nsight and Visual Profiler to show complete traces
hipError_t cudaStatus;
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
else return 0;
} | 247d22e0fb81cd7eca19a2f8826e68ca436a825c.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void reduceKernel(int *input, int *output, int N)
{
int tid = threadIdx.x;
int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
extern __shared__ int sdata[];
sdata[tid] = 0;
//perform first level of reduction, reading from global memory, writing to shared memory
int sum = (i < N) ? input[i] : 0;
if (i + blockDim.x < N) sum += input[i+blockDim.x];
sdata[tid] = sum;
//synchronise threads in this block before manipulating with the data
__syncthreads();
//do reduction in shared memory
for (int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
//write result for this block to global mem
if(tid == 0) output[blockIdx.x] = sdata[0];
}
int nextPow2(int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
int main(int argc, char **argv)
{
//number of elements in the array
int N = 4000000;
//set the number of threads
int maxThreads = 128;
//grid and block sizes
int threads = (N < maxThreads*2) ? nextPow2((N + 1)/ 2) : maxThreads;
int blocks = (N + (threads * 2 - 1)) / (threads * 2);
dim3 grid(blocks, 1, 1);
dim3 block(threads, 1, 1);
//print the number of elements
printf("\n======================\n");
printf("Parallel reduction sum\n");
printf("======================\n\n");
printf("Total number of elements to sum: %i\n", N);
printf("Kernel launch configuration: %i blocks of %i threads\n", grid.x, block.x);
//host memory pointer
int *data_h;
//allocate host memory
data_h = (int*)malloc(N*sizeof(int));
//initialise random number generator seed based on current time
srand(time(NULL));
//generate data
for (int i=0; i<N; i++) data_h[i] = 1;
//device memory pointers
int *data_d;
int *blockSum_d;
//allocate device memory
cudaMalloc((void **)&data_d, N * sizeof(int));
cudaMalloc((void **)&blockSum_d, grid.x * sizeof(int));
//copy memory to device
cudaMemcpy(data_d, data_h, N * sizeof(int), cudaMemcpyHostToDevice);
//calculate sums on device
float timeGPU;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//level 0
printf("Level 0 kernel summing %i elements with %i blocks of %i threads...\n", N, grid.x, block.x);
reduceKernel<<<grid, block, block.x*sizeof(int)>>>(data_d, blockSum_d, N);
//level 1+
int remainingElements = grid.x;
int level = 1;
while(remainingElements > 1)
{
threads = (remainingElements < maxThreads*2) ? nextPow2((remainingElements + 1)/ 2) : maxThreads;
blocks = (remainingElements + (threads * 2 - 1)) / (threads * 2);
printf("Level %i kernel summing %i elements with %i blocks of %i threads...\n", level, remainingElements, blocks, threads);
reduceKernel<<<blocks, threads, threads*sizeof(int)>>>(blockSum_d, blockSum_d, remainingElements);
remainingElements = blocks;
level++;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeGPU, start, stop);
//copy results back to host
int sumGPU;
cudaMemcpy(&sumGPU, blockSum_d, sizeof(int), cudaMemcpyDeviceToHost);
//print result
printf("result: %i time: %f ms throughput: %.4f GB/s\n", sumGPU, timeGPU, 1.0e-9 * ((double)N*sizeof(int))/(timeGPU/1000));
//cudaDeviceReset must be called before exiting in order for profiling and tracing tools such as Nsight and Visual Profiler to show complete traces
cudaError_t cudaStatus;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
else return 0;
} |
5b6ad9542964b4b4c0f1417dab04efc657c02a66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/util/mm_func.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe
{
const int BLOCK_SIZE = 32;
template <typename Dtype>
void __global__ matrix_multiply_kernel(
const int M, const int N, const int K,
const Dtype* A, const Dtype* B, Dtype* C)
{
int br = blockIdx.y, bc = blockIdx.x;
int tr = threadIdx.y, tc = threadIdx.x;
int Cr = br * BLOCK_SIZE + tr;
int Cc = bc * BLOCK_SIZE + tc;
Dtype s = 0;
int BN = (K + BLOCK_SIZE - 1) / BLOCK_SIZE;
for (int i = 0; i < BN; ++i) {
__shared__ float a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float b[BLOCK_SIZE][BLOCK_SIZE];
int Ar = Cr, Ac = i * BLOCK_SIZE + tc;
if (Ar < M && Ac < K)
a[tr][tc] = A[Ar * K + Ac];
else
a[tr][tc] = 0;
int Br = i * BLOCK_SIZE + tr, Bc = Cc;
if (Br < K && Bc < N)
b[tr][tc] = B[Br * N + Bc];
else
b[tr][tc] = 0;
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; ++j)
s += a[tr][j] * b[j][tc];
__syncthreads();
}
if (Cr < M && Cc < N)
C[Cr * N + Cc] = s;
}
template <typename Dtype>
void matrix_multiply(const int M, const int N, const int K,
const Dtype* A, const Dtype* B, Dtype* C)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(N / BLOCK_SIZE, M / BLOCK_SIZE);
hipLaunchKernelGGL(( matrix_multiply_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, M, N, K, A, B, C);
}
template
void matrix_multiply<float>(
const int M, const int N, const int K,
const float* A, const float* B, float* C);
template
void matrix_multiply<double>(
const int M, const int N, const int K,
const double* A, const double* B, double* C);
}
| 5b6ad9542964b4b4c0f1417dab04efc657c02a66.cu | #include "caffe/util/mm_func.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe
{
const int BLOCK_SIZE = 32;
template <typename Dtype>
void __global__ matrix_multiply_kernel(
const int M, const int N, const int K,
const Dtype* A, const Dtype* B, Dtype* C)
{
int br = blockIdx.y, bc = blockIdx.x;
int tr = threadIdx.y, tc = threadIdx.x;
int Cr = br * BLOCK_SIZE + tr;
int Cc = bc * BLOCK_SIZE + tc;
Dtype s = 0;
int BN = (K + BLOCK_SIZE - 1) / BLOCK_SIZE;
for (int i = 0; i < BN; ++i) {
__shared__ float a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float b[BLOCK_SIZE][BLOCK_SIZE];
int Ar = Cr, Ac = i * BLOCK_SIZE + tc;
if (Ar < M && Ac < K)
a[tr][tc] = A[Ar * K + Ac];
else
a[tr][tc] = 0;
int Br = i * BLOCK_SIZE + tr, Bc = Cc;
if (Br < K && Bc < N)
b[tr][tc] = B[Br * N + Bc];
else
b[tr][tc] = 0;
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; ++j)
s += a[tr][j] * b[j][tc];
__syncthreads();
}
if (Cr < M && Cc < N)
C[Cr * N + Cc] = s;
}
template <typename Dtype>
void matrix_multiply(const int M, const int N, const int K,
const Dtype* A, const Dtype* B, Dtype* C)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(N / BLOCK_SIZE, M / BLOCK_SIZE);
matrix_multiply_kernel<<<dimGrid, dimBlock>>>(M, N, K, A, B, C);
}
template
void matrix_multiply<float>(
const int M, const int N, const int K,
const float* A, const float* B, float* C);
template
void matrix_multiply<double>(
const int M, const int N, const int K,
const double* A, const double* B, double* C);
}
|
f68840bee16c9ea57b50c10b6a3fce52fa672598.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include <stdio.h>
#include <type_traits>
template <typename V>
__global__ void __launch_bounds__(128) gemm_32x32x32_TN_vec4(float* U, const V* __restrict__ X, const V* __restrict__ E, uint C, uint K, uint N, uint C16, uint K16, uint inc_n, uint inc_c, uint inc_k)
{
__shared__ float shrU[32*32*2 + 16*4];
uint tid = threadIdx.x;
uint idx_C = blockIdx.y;
uint idx_K = blockIdx.x;
uint idx_N = blockIdx.z;
uint tx = tid & 7;
uint ty = tid >> 3;
uint n = idx_N*32 + ty;
// global offsets in vector units
uint c = idx_C*8 + tx;
uint k = idx_K*8 + tx;
uint offsetC = n*C + c;
uint offsetK = n*K + k;
bool bc = c < C;
bool bk = k < K;
// shared offsets in bytes
// When reading, each warp works on its own 8 rows.
// These groups of 8 are added together at end.
uint writeS = (ty*32 + tx*4) * 4;
uint row8 = (tid & 96) * 32;
uint readCs = row8 + (((tid & 16) >> 3) | (tid & 1)) * 16;
uint readKs = row8 + ((tid >> 1) & 7) * 16;
// This keeps all prior logic outside of the loops.
asm("mov.b32 %0, %0;" : "+r"(writeS) : );
asm("mov.b32 %0, %0;" : "+r"(offsetC) : );
asm("mov.b32 %0, %0;" : "+r"(offsetK) : );
asm("mov.b32 %0, %0;" : "+r"(readCs) : );
asm("mov.b32 %0, %0;" : "+r"(readKs) : );
// zero 32 accumulation registers
float regU[8][4]; // [c][k]
for (int c = 0; c < 8; c++)
for (int k = 0; k < 4; k++)
regU[c][k] = 0;
// assume a minimum of one loop
#pragma unroll 1
do
{
V c00, c16;
V k00, k16;
ew_zero(c00); ew_zero(c16);
ew_zero(k00); ew_zero(k16);
const V* X00 = add_ptr_u(X, offsetC + 0);
const V* X16 = add_ptr_u(X, offsetC + C16);
const V* E00 = add_ptr_u(E, offsetK + 0);
const V* E16 = add_ptr_u(E, offsetK + K16);
if (bc)
{
c00 = ldg(X00);
c16 = ldg(X16);
}
if (bk)
{
k00 = ldg(E00);
k16 = ldg(E16);
}
offsetC += inc_c;
offsetK += inc_k;
n += inc_n;
__syncthreads();
st_shared_v4(writeS + ( 0*32 + 0*16*32)*4, to_float(c00));
st_shared_v4(writeS + ( 0*32 + 1*16*32)*4, to_float(c16));
st_shared_v4(writeS + (32*32 + 0*16*32)*4, to_float(k00));
st_shared_v4(writeS + (32*32 + 1*16*32)*4, to_float(k16));
__syncthreads();
float regC[8], regK[4];
#pragma unroll
for (int j = 0; j < 8; j++)
{
// fetch outer product data
ld_shared_v4(readCs + ( 0*32 + 32*j + 0)*4, ®C[0] );
ld_shared_v4(readCs + ( 0*32 + 32*j + 16)*4, ®C[4] );
ld_shared_v4(readKs + (32*32 + 32*j + 0)*4, regK );
// compute outer product
for (int c = 0; c < 8; c++)
for (int k = 0; k < 4; k++)
regU[c][k] += regC[c] * regK[k];
}
} while (n < N);
// conserve registers by forcing a reload of these
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_K) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_C) :);
// Arrange 4 tiles horizontally in the X direction: ((tid & 96) >> 2)
// Add some spacing to avoid write bank conflicts: (tidY << 2)
int tidY = ((tid & 16) >> 3) | (tid & 1);
int tidX = ((tid >> 1) & 7) + ((tid & 96) >> 2) + (tidY << 2);
float4* storU4 = (float4*)&shrU[tidY*32*4*4 + tidX*4];
__syncthreads();
storU4[0*8*4] = *(float4*)regU[0];
storU4[1*8*4] = *(float4*)regU[1];
storU4[2*8*4] = *(float4*)regU[2];
storU4[3*8*4] = *(float4*)regU[3];
__syncthreads();
// leaving vector math
uint tid31 = tid & 31;
uint tid32 = tid >> 5;
C *= 4;
K *= 4;
float* readU = &shrU[tid32*32*4 + tid31];
float u[4][4];
for (int j = 0; j < 4; j++)
for (int i = 0; i < 4; i++)
u[j][i] = readU[j*32*4*4 + j*16 + i*32];
// Tree reduce
for (int k = 0; k < 4; k++)
for (int j = 2; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
u[k][i] += u[k][i+j];
k = idx_K*32 + tid31;
c = idx_C*32 + tid32;
bk = k < K;
uint offsetU = c*K + k;
atomicRed(add_ptr_u(U, offsetU + 0*K), u[0][0], 0, bk && c + 0 < C);
atomicRed(add_ptr_u(U, offsetU + 4*K), u[1][0], 0, bk && c + 4 < C);
atomicRed(add_ptr_u(U, offsetU + 8*K), u[2][0], 0, bk && c + 8 < C);
atomicRed(add_ptr_u(U, offsetU + 12*K), u[3][0], 0, bk && c + 12 < C);
__syncthreads();
storU4[0*8*4] = *(float4*)regU[4];
storU4[1*8*4] = *(float4*)regU[5];
storU4[2*8*4] = *(float4*)regU[6];
storU4[3*8*4] = *(float4*)regU[7];
__syncthreads();
for (int j = 0; j < 4; j++)
for (int i = 0; i < 4; i++)
u[j][i] = readU[j*32*4*4 + j*16 + i*32];
// Tree reduce
for (int k = 0; k < 4; k++)
for (int j = 2; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
u[k][i] += u[k][i+j];
atomicRed(add_ptr_u(U, offsetU + 16*K), u[0][0], 0, bk && c + 16 < C);
atomicRed(add_ptr_u(U, offsetU + 20*K), u[1][0], 0, bk && c + 20 < C);
atomicRed(add_ptr_u(U, offsetU + 24*K), u[2][0], 0, bk && c + 24 < C);
atomicRed(add_ptr_u(U, offsetU + 28*K), u[3][0], 0, bk && c + 28 < C);
}
# if __CUDA_ARCH__ >= 700
#include <mma.h>
using namespace nvcuda::wmma;
extern "C"
__device__ __noinline__ void output_gemm_64x64x32_TN(float* fShare, float* U, uint C, uint K, uint offsetU, uint readU)
{
for (int i = 0; i < 8; i++)
atomicRed(U + (offsetU + i*K), fShare[readU + i*272] + fShare[readU + i*272 + 128]);
}
extern "C"
__global__ void __launch_bounds__(256) hmma_gemm_64x64x32_TN_vec8(float* U, const ehalf8* __restrict__ X, const ehalf8* __restrict__ E, uint C, uint K, uint N, uint inc_n, uint inc_c, uint inc_k)
{
__shared__ float fShare[(256+16)*16];
half* hShare = (half*)&fShare[0];
uint tid = threadIdx.x;
uint idx_C = blockIdx.y;
uint idx_K = blockIdx.x;
uint idx_N = blockIdx.z;
uint tid31 = tid & 31;
uint tx = tid & 7;
uint ty = tid >> 3;
uint n = idx_N*32 + ty;
// global offsets in vector units
uint c = idx_C*8 + tx;
uint k = idx_K*8 + tx;
uint offsetC = n*C + c;
uint offsetK = n*K + k;
// bool bc = c < C;
// bool bk = k < K;
asm volatile (".reg .pred bc; setp.lt.u32 bc, %0, %1;" :: "r"(c), "r"(C));
asm volatile (".reg .pred bk; setp.lt.u32 bk, %0, %1;" :: "r"(k), "r"(K));
// When reading, each group of 4 warp works on its own 16 rows.
// These 2 groups of 16 are added together at end.
// Also add 16 elements per row to reduce bank conflicts.
uint writeS = ty*80*2 + tx*8*2; // byte units
uint row16 = (tid & 128) * (16*80/128);
uint readCs = row16 + (tid & 64) * 32/64 + (tid31 & 3)*80 + (tid31 & 4)*2 + (tid31 & 16)/4;
uint readKs = row16 + (tid & 32) + 32*80 + (tid31 & 3)*80 + (tid31 & 8) + (tid31 & 16)/4;
fragment<accumulator,16,16,16,float> fragU[2][2];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
fill_fragment(fragU[i][j], 0.0f);
// assume a minimum of one loop
#pragma unroll 1
do
{
asm volatile ("{\n\t"
".reg .u32 c<4>, k<4>;\n\t"
"mov.u32 c0, 0;\n\t"
"mov.u32 c1, 0;\n\t"
"mov.u32 c2, 0;\n\t"
"mov.u32 c3, 0;\n\t"
"mov.u32 k0, 0;\n\t"
"mov.u32 k1, 0;\n\t"
"mov.u32 k2, 0;\n\t"
"mov.u32 k3, 0;\n\t"
"@bc ld.global.nc.v4.u32 {c0, c1, c2, c3}, [%0];\n\t"
"@bk ld.global.nc.v4.u32 {k0, k1, k2, k3}, [%1];\n\t"
"bar.sync 0;\n\t"
"st.shared.v4.u32 [%2 + 0*80*2], {c0, c1, c2, c3};\n\t"
"st.shared.v4.u32 [%2 + 32*80*2], {k0, k1, k2, k3};\n\t"
"bar.sync 0;\n\t"
"}" :: "l"(X + offsetC), "l"(E + offsetK), "r"(writeS));
offsetC += inc_c;
offsetK += inc_k;
n += inc_n;
fragment<matrix_a,16,16,16,half,col_major> fragC[2];
fragment<matrix_b,16,16,16,half,row_major> fragK[2];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 4; j++)
{
*(uint2*)&fragC[i].x[j*4] = *(uint2*)&hShare[readCs + i*16 + j*4*80];
*(uint2*)&fragK[i].x[j*4] = *(uint2*)&hShare[readKs + i*16 + j*4*80];
}
mma_sync(fragU[0][0], fragC[0], fragK[0], fragU[0][0], false);
mma_sync(fragU[1][0], fragC[1], fragK[0], fragU[1][0], false);
mma_sync(fragU[1][1], fragC[1], fragK[1], fragU[1][1], false);
mma_sync(fragU[0][1], fragC[0], fragK[1], fragU[0][1], false);
} while (n < N);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_K) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_C) :);
uint storU = (tid & 224) + ((tid & 1) + (tid & 4)*2 + (tid & 16)/4)*272 + (tid & 2) + (tid & 8);
uint readU = (tid & 127) + (tid & 128) * (272*8/128);
// leaving vector math
C *= 8;
K *= 8;
k = idx_K*64 + (tid & 63);
c = idx_C*64 + (tid & 64)*32/64 + (tid & 128)*8/128;
bool bk = k < K;
uint offsetU = c*K + k;
#pragma unroll
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
for (int m = 0; m < 2; m++)
*(float2*)&fShare[storU + j*16 + k*4 + m*2*272] = *(float2*)&fragU[i][j].x[k*4 + m*2];
//store_matrix_sync(&fShare[storU + j*16], fragU[i][j], 272, mem_row_major);
__syncthreads();
if (c + i*16 < C && bk)
output_gemm_64x64x32_TN(fShare, U, C, K, offsetU + i*16*K, readU);
}
}
#else // __CUDA_ARCH__ >= 700
__global__ void __launch_bounds__(256) hmma_gemm_64x64x32_TN_vec8(float* U, const ehalf8* __restrict__ X, const ehalf8* __restrict__ E, uint C, uint K, uint N, uint inc_n, uint inc_c, uint inc_k)
{
*U = 0;
}
#endif // __CUDA_ARCH__ >= 700
template <typename V>
bool Gemm_TN(hipStream_t stream, uint SMs, int major,
float* u,
const V* x,
const V* e,
uint C, uint K, uint N)
{
hipMemsetAsync((hipDeviceptr_t)u, 0, C*K, stream);
if (std::is_same<V, ehalf4>::value && major >= 7 && (C & 7) == 0 && (K & 7) == 0)
{
const ehalf8* X = (const ehalf8*)x;
const ehalf8* E = (const ehalf8*)e;
uint gridK = CEIL_DIV(K, 64);
uint gridC = CEIL_DIV(C, 64);
uint gridN = CEIL_DIV(N, 32);
C >>= 3;
K >>= 3;
// target 4 blocks per SM
uint segments = SMs, tiles = gridK*gridC;
if (tiles >= 64) segments /= 8;
else if (tiles >= 16) segments /= 4;
else if (tiles > 4) segments /= 2;
else if (tiles == 2) segments *= 2;
else if (tiles == 1) segments *= 4;
if (segments > gridN)
segments = gridN;
uint seg_len = segments*32;
dim3 grid(gridK, gridC, segments);
hipLaunchKernelGGL(( hmma_gemm_64x64x32_TN_vec8), dim3(grid),dim3(256),0,stream, u, X, E, C, K, N, seg_len, seg_len*C, seg_len*K);
return true; // TODO
}
uint gridK = CEIL_DIV(K, 32);
uint gridC = CEIL_DIV(C, 32);
uint gridN = CEIL_DIV(N, 32);
C >>= 2;
K >>= 2;
// target mult of 6 blocks per SM
uint smMult = 1, tiles = gridK*gridC;
if (tiles == 1) smMult = 6;
else if (tiles <= 4) smMult = 3;
uint segments = SMs*smMult;
if (segments > gridN)
segments = gridN;
uint seg_len = segments*32;
dim3 grid(gridK, gridC, segments);
hipLaunchKernelGGL(( gemm_32x32x32_TN_vec4<V>), dim3(grid),dim3(128),0,stream, u, x, e, C, K, N, C*16, K*16, seg_len, seg_len*C, seg_len*K);
return true; // TODO
}
template bool Gemm_TN<float4>(hipStream_t stream, uint SMs, int major, float* u, const float4* x, const float4* e, uint C, uint K, uint N);
template bool Gemm_TN<ehalf4>(hipStream_t stream, uint SMs, int major, float* u, const ehalf4* x, const ehalf4* e, uint C, uint K, uint N);
#endif // GOOGLE_CUDA
| f68840bee16c9ea57b50c10b6a3fce52fa672598.cu |
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include <stdio.h>
#include <type_traits>
template <typename V>
__global__ void __launch_bounds__(128) gemm_32x32x32_TN_vec4(float* U, const V* __restrict__ X, const V* __restrict__ E, uint C, uint K, uint N, uint C16, uint K16, uint inc_n, uint inc_c, uint inc_k)
{
__shared__ float shrU[32*32*2 + 16*4];
uint tid = threadIdx.x;
uint idx_C = blockIdx.y;
uint idx_K = blockIdx.x;
uint idx_N = blockIdx.z;
uint tx = tid & 7;
uint ty = tid >> 3;
uint n = idx_N*32 + ty;
// global offsets in vector units
uint c = idx_C*8 + tx;
uint k = idx_K*8 + tx;
uint offsetC = n*C + c;
uint offsetK = n*K + k;
bool bc = c < C;
bool bk = k < K;
// shared offsets in bytes
// When reading, each warp works on its own 8 rows.
// These groups of 8 are added together at end.
uint writeS = (ty*32 + tx*4) * 4;
uint row8 = (tid & 96) * 32;
uint readCs = row8 + (((tid & 16) >> 3) | (tid & 1)) * 16;
uint readKs = row8 + ((tid >> 1) & 7) * 16;
// This keeps all prior logic outside of the loops.
asm("mov.b32 %0, %0;" : "+r"(writeS) : );
asm("mov.b32 %0, %0;" : "+r"(offsetC) : );
asm("mov.b32 %0, %0;" : "+r"(offsetK) : );
asm("mov.b32 %0, %0;" : "+r"(readCs) : );
asm("mov.b32 %0, %0;" : "+r"(readKs) : );
// zero 32 accumulation registers
float regU[8][4]; // [c][k]
for (int c = 0; c < 8; c++)
for (int k = 0; k < 4; k++)
regU[c][k] = 0;
// assume a minimum of one loop
#pragma unroll 1
do
{
V c00, c16;
V k00, k16;
ew_zero(c00); ew_zero(c16);
ew_zero(k00); ew_zero(k16);
const V* X00 = add_ptr_u(X, offsetC + 0);
const V* X16 = add_ptr_u(X, offsetC + C16);
const V* E00 = add_ptr_u(E, offsetK + 0);
const V* E16 = add_ptr_u(E, offsetK + K16);
if (bc)
{
c00 = ldg(X00);
c16 = ldg(X16);
}
if (bk)
{
k00 = ldg(E00);
k16 = ldg(E16);
}
offsetC += inc_c;
offsetK += inc_k;
n += inc_n;
__syncthreads();
st_shared_v4(writeS + ( 0*32 + 0*16*32)*4, to_float(c00));
st_shared_v4(writeS + ( 0*32 + 1*16*32)*4, to_float(c16));
st_shared_v4(writeS + (32*32 + 0*16*32)*4, to_float(k00));
st_shared_v4(writeS + (32*32 + 1*16*32)*4, to_float(k16));
__syncthreads();
float regC[8], regK[4];
#pragma unroll
for (int j = 0; j < 8; j++)
{
// fetch outer product data
ld_shared_v4(readCs + ( 0*32 + 32*j + 0)*4, ®C[0] );
ld_shared_v4(readCs + ( 0*32 + 32*j + 16)*4, ®C[4] );
ld_shared_v4(readKs + (32*32 + 32*j + 0)*4, regK );
// compute outer product
for (int c = 0; c < 8; c++)
for (int k = 0; k < 4; k++)
regU[c][k] += regC[c] * regK[k];
}
} while (n < N);
// conserve registers by forcing a reload of these
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_K) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_C) :);
// Arrange 4 tiles horizontally in the X direction: ((tid & 96) >> 2)
// Add some spacing to avoid write bank conflicts: (tidY << 2)
int tidY = ((tid & 16) >> 3) | (tid & 1);
int tidX = ((tid >> 1) & 7) + ((tid & 96) >> 2) + (tidY << 2);
float4* storU4 = (float4*)&shrU[tidY*32*4*4 + tidX*4];
__syncthreads();
storU4[0*8*4] = *(float4*)regU[0];
storU4[1*8*4] = *(float4*)regU[1];
storU4[2*8*4] = *(float4*)regU[2];
storU4[3*8*4] = *(float4*)regU[3];
__syncthreads();
// leaving vector math
uint tid31 = tid & 31;
uint tid32 = tid >> 5;
C *= 4;
K *= 4;
float* readU = &shrU[tid32*32*4 + tid31];
float u[4][4];
for (int j = 0; j < 4; j++)
for (int i = 0; i < 4; i++)
u[j][i] = readU[j*32*4*4 + j*16 + i*32];
// Tree reduce
for (int k = 0; k < 4; k++)
for (int j = 2; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
u[k][i] += u[k][i+j];
k = idx_K*32 + tid31;
c = idx_C*32 + tid32;
bk = k < K;
uint offsetU = c*K + k;
atomicRed(add_ptr_u(U, offsetU + 0*K), u[0][0], 0, bk && c + 0 < C);
atomicRed(add_ptr_u(U, offsetU + 4*K), u[1][0], 0, bk && c + 4 < C);
atomicRed(add_ptr_u(U, offsetU + 8*K), u[2][0], 0, bk && c + 8 < C);
atomicRed(add_ptr_u(U, offsetU + 12*K), u[3][0], 0, bk && c + 12 < C);
__syncthreads();
storU4[0*8*4] = *(float4*)regU[4];
storU4[1*8*4] = *(float4*)regU[5];
storU4[2*8*4] = *(float4*)regU[6];
storU4[3*8*4] = *(float4*)regU[7];
__syncthreads();
for (int j = 0; j < 4; j++)
for (int i = 0; i < 4; i++)
u[j][i] = readU[j*32*4*4 + j*16 + i*32];
// Tree reduce
for (int k = 0; k < 4; k++)
for (int j = 2; j > 0; j >>= 1)
for (int i = 0; i < j; i++)
u[k][i] += u[k][i+j];
atomicRed(add_ptr_u(U, offsetU + 16*K), u[0][0], 0, bk && c + 16 < C);
atomicRed(add_ptr_u(U, offsetU + 20*K), u[1][0], 0, bk && c + 20 < C);
atomicRed(add_ptr_u(U, offsetU + 24*K), u[2][0], 0, bk && c + 24 < C);
atomicRed(add_ptr_u(U, offsetU + 28*K), u[3][0], 0, bk && c + 28 < C);
}
# if __CUDA_ARCH__ >= 700
#include <mma.h>
using namespace nvcuda::wmma;
extern "C"
__device__ __noinline__ void output_gemm_64x64x32_TN(float* fShare, float* U, uint C, uint K, uint offsetU, uint readU)
{
for (int i = 0; i < 8; i++)
atomicRed(U + (offsetU + i*K), fShare[readU + i*272] + fShare[readU + i*272 + 128]);
}
extern "C"
__global__ void __launch_bounds__(256) hmma_gemm_64x64x32_TN_vec8(float* U, const ehalf8* __restrict__ X, const ehalf8* __restrict__ E, uint C, uint K, uint N, uint inc_n, uint inc_c, uint inc_k)
{
__shared__ float fShare[(256+16)*16];
half* hShare = (half*)&fShare[0];
uint tid = threadIdx.x;
uint idx_C = blockIdx.y;
uint idx_K = blockIdx.x;
uint idx_N = blockIdx.z;
uint tid31 = tid & 31;
uint tx = tid & 7;
uint ty = tid >> 3;
uint n = idx_N*32 + ty;
// global offsets in vector units
uint c = idx_C*8 + tx;
uint k = idx_K*8 + tx;
uint offsetC = n*C + c;
uint offsetK = n*K + k;
// bool bc = c < C;
// bool bk = k < K;
asm volatile (".reg .pred bc; setp.lt.u32 bc, %0, %1;" :: "r"(c), "r"(C));
asm volatile (".reg .pred bk; setp.lt.u32 bk, %0, %1;" :: "r"(k), "r"(K));
// When reading, each group of 4 warp works on its own 16 rows.
// These 2 groups of 16 are added together at end.
// Also add 16 elements per row to reduce bank conflicts.
uint writeS = ty*80*2 + tx*8*2; // byte units
uint row16 = (tid & 128) * (16*80/128);
uint readCs = row16 + (tid & 64) * 32/64 + (tid31 & 3)*80 + (tid31 & 4)*2 + (tid31 & 16)/4;
uint readKs = row16 + (tid & 32) + 32*80 + (tid31 & 3)*80 + (tid31 & 8) + (tid31 & 16)/4;
fragment<accumulator,16,16,16,float> fragU[2][2];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 2; j++)
fill_fragment(fragU[i][j], 0.0f);
// assume a minimum of one loop
#pragma unroll 1
do
{
asm volatile ("{\n\t"
".reg .u32 c<4>, k<4>;\n\t"
"mov.u32 c0, 0;\n\t"
"mov.u32 c1, 0;\n\t"
"mov.u32 c2, 0;\n\t"
"mov.u32 c3, 0;\n\t"
"mov.u32 k0, 0;\n\t"
"mov.u32 k1, 0;\n\t"
"mov.u32 k2, 0;\n\t"
"mov.u32 k3, 0;\n\t"
"@bc ld.global.nc.v4.u32 {c0, c1, c2, c3}, [%0];\n\t"
"@bk ld.global.nc.v4.u32 {k0, k1, k2, k3}, [%1];\n\t"
"bar.sync 0;\n\t"
"st.shared.v4.u32 [%2 + 0*80*2], {c0, c1, c2, c3};\n\t"
"st.shared.v4.u32 [%2 + 32*80*2], {k0, k1, k2, k3};\n\t"
"bar.sync 0;\n\t"
"}" :: "l"(X + offsetC), "l"(E + offsetK), "r"(writeS));
offsetC += inc_c;
offsetK += inc_k;
n += inc_n;
fragment<matrix_a,16,16,16,half,col_major> fragC[2];
fragment<matrix_b,16,16,16,half,row_major> fragK[2];
for (int i = 0; i < 2; i++)
for (int j = 0; j < 4; j++)
{
*(uint2*)&fragC[i].x[j*4] = *(uint2*)&hShare[readCs + i*16 + j*4*80];
*(uint2*)&fragK[i].x[j*4] = *(uint2*)&hShare[readKs + i*16 + j*4*80];
}
mma_sync(fragU[0][0], fragC[0], fragK[0], fragU[0][0], false);
mma_sync(fragU[1][0], fragC[1], fragK[0], fragU[1][0], false);
mma_sync(fragU[1][1], fragC[1], fragK[1], fragU[1][1], false);
mma_sync(fragU[0][1], fragC[0], fragK[1], fragU[0][1], false);
} while (n < N);
asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :);
asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_K) :);
asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_C) :);
uint storU = (tid & 224) + ((tid & 1) + (tid & 4)*2 + (tid & 16)/4)*272 + (tid & 2) + (tid & 8);
uint readU = (tid & 127) + (tid & 128) * (272*8/128);
// leaving vector math
C *= 8;
K *= 8;
k = idx_K*64 + (tid & 63);
c = idx_C*64 + (tid & 64)*32/64 + (tid & 128)*8/128;
bool bk = k < K;
uint offsetU = c*K + k;
#pragma unroll
for (int i = 0; i < 2; i++)
{
__syncthreads();
for (int j = 0; j < 2; j++)
for (int k = 0; k < 2; k++)
for (int m = 0; m < 2; m++)
*(float2*)&fShare[storU + j*16 + k*4 + m*2*272] = *(float2*)&fragU[i][j].x[k*4 + m*2];
//store_matrix_sync(&fShare[storU + j*16], fragU[i][j], 272, mem_row_major);
__syncthreads();
if (c + i*16 < C && bk)
output_gemm_64x64x32_TN(fShare, U, C, K, offsetU + i*16*K, readU);
}
}
#else // __CUDA_ARCH__ >= 700
__global__ void __launch_bounds__(256) hmma_gemm_64x64x32_TN_vec8(float* U, const ehalf8* __restrict__ X, const ehalf8* __restrict__ E, uint C, uint K, uint N, uint inc_n, uint inc_c, uint inc_k)
{
*U = 0;
}
#endif // __CUDA_ARCH__ >= 700
template <typename V>
bool Gemm_TN(CUstream stream, uint SMs, int major,
float* u,
const V* x,
const V* e,
uint C, uint K, uint N)
{
cuMemsetD32Async((CUdeviceptr)u, 0, C*K, stream);
if (std::is_same<V, ehalf4>::value && major >= 7 && (C & 7) == 0 && (K & 7) == 0)
{
const ehalf8* X = (const ehalf8*)x;
const ehalf8* E = (const ehalf8*)e;
uint gridK = CEIL_DIV(K, 64);
uint gridC = CEIL_DIV(C, 64);
uint gridN = CEIL_DIV(N, 32);
C >>= 3;
K >>= 3;
// target 4 blocks per SM
uint segments = SMs, tiles = gridK*gridC;
if (tiles >= 64) segments /= 8;
else if (tiles >= 16) segments /= 4;
else if (tiles > 4) segments /= 2;
else if (tiles == 2) segments *= 2;
else if (tiles == 1) segments *= 4;
if (segments > gridN)
segments = gridN;
uint seg_len = segments*32;
dim3 grid(gridK, gridC, segments);
hmma_gemm_64x64x32_TN_vec8<<<grid,256,0,stream>>>(u, X, E, C, K, N, seg_len, seg_len*C, seg_len*K);
return true; // TODO
}
uint gridK = CEIL_DIV(K, 32);
uint gridC = CEIL_DIV(C, 32);
uint gridN = CEIL_DIV(N, 32);
C >>= 2;
K >>= 2;
// target mult of 6 blocks per SM
uint smMult = 1, tiles = gridK*gridC;
if (tiles == 1) smMult = 6;
else if (tiles <= 4) smMult = 3;
uint segments = SMs*smMult;
if (segments > gridN)
segments = gridN;
uint seg_len = segments*32;
dim3 grid(gridK, gridC, segments);
gemm_32x32x32_TN_vec4<V><<<grid,128,0,stream>>>(u, x, e, C, K, N, C*16, K*16, seg_len, seg_len*C, seg_len*K);
return true; // TODO
}
template bool Gemm_TN<float4>(CUstream stream, uint SMs, int major, float* u, const float4* x, const float4* e, uint C, uint K, uint N);
template bool Gemm_TN<ehalf4>(CUstream stream, uint SMs, int major, float* u, const ehalf4* x, const ehalf4* e, uint C, uint K, uint N);
#endif // GOOGLE_CUDA
|
f25db2e3b86e6711a7c98902f879e953ea1c5707.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=[64,64] --gridDim=64 --no-inline
#include <hip/hip_runtime.h>
__axiom(blockIdx.x == 16);
__axiom(blockIdx.y == 16);
__global__ void foo(int* A) {
// Only race free because of axioms
if(blockIdx.x != 16 || blockIdx.y != 16) {
A[0] = threadIdx.x;
}
}
| f25db2e3b86e6711a7c98902f879e953ea1c5707.cu | //pass
//--blockDim=[64,64] --gridDim=64 --no-inline
#include <cuda.h>
__axiom(blockIdx.x == 16);
__axiom(blockIdx.y == 16);
__global__ void foo(int* A) {
// Only race free because of axioms
if(blockIdx.x != 16 || blockIdx.y != 16) {
A[0] = threadIdx.x;
}
}
|
3c21cf848c758dcec4c101bbb506c83aad6440bd.hip | // !!! This is a file automatically generated by hipify!!!
/*
CG Solver.
[2016.05.03 Hideo Matsufuru]
*/
#include "lattice.h"
extern real_t *p, *x;
static void solve_CG_init(real_t *__restrict__ rrp, real_t *__restrict__ rr, real_t *u,
real_t *x, real_t *r,
real_t *s, real_t *p)
{
#ifdef _PROF
double tmp = dtime();
#endif
copy(r, s);
copy(x, s);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[COPY] += dtime() - tmp;
#endif
opr_DdagD_alt(s, u, x, 0);
#ifdef _PROF
tmp = dtime();
#endif
axpy(r, -1.0, s);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[AXPY] += dtime() - tmp;
tmp = dtime();
#endif
copy(p, r);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[COPY] += dtime() - tmp;
tmp = dtime();
#endif
*rrp = *rr = norm2(r);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[NORM] += dtime() - tmp;
#endif
}
static void solve_CG_step(real_t *__restrict__ rrp2, real_t *__restrict__ rr2, real_t *u,
real_t *x, real_t *r,
real_t *p, real_t *v)
{
real_t rrp = *rrp2;
opr_DdagD_alt(v, u, p, 1);
#ifdef _PROF
double tmp = dtime();
#endif
real_t pap = dot(v, p);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[DOT] += dtime() - tmp;
#endif
real_t cr = rrp/pap;
#ifdef _PROF
tmp = dtime();
#endif
axpy(x, cr, p);
axpy(r, -cr, v);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[AXPY] += dtime() - tmp;
tmp = dtime();
#endif
real_t rr = norm2(r);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[NORM] += dtime() - tmp;
#endif
real_t bk = rr/rrp;
#ifdef _PROF
tmp = dtime();
#endif
scal(p, bk);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[SCAL] += dtime() - tmp;
tmp = dtime();
#endif
axpy(p, 1.0, r);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[AXPY] += dtime() - tmp;
#endif
*rr2 = *rrp2 = rr;
}
void solve_CG(const real_t enorm, int *__restrict__ nconv, real_t *__restrict__ diff, real_t *xq,
real_t *u, real_t *b)
{
int niter = 1000;
// static real_t *x, *s, *r, *p;
static real_t *s, *r;
static int STATIC_FLAG = 1;
// if(STATIC_FLAG) HANDLE_ERROR( hipMalloc((void**)&x, NVST2*sizeof(real_t)) );
if(STATIC_FLAG) HANDLE_ERROR( hipMalloc((void**)&s, NVST2*sizeof(real_t)) );
if(STATIC_FLAG) HANDLE_ERROR( hipMalloc((void**)&r, NVST2*sizeof(real_t)) );
// if(STATIC_FLAG) HANDLE_ERROR( hipMalloc((void**)&p, NVST2*sizeof(real_t)) );
#ifdef _PROF
double tmp = dtime();
#endif
copy(s, b);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[COPY] += dtime() - tmp;
tmp = dtime();
#endif
real_t sr = norm2(s);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[NORM] += dtime() - tmp;
#endif
real_t snorm = 1.0/sr;
real_t rr, rrp;
*nconv = -1;
solve_CG_init(&rrp, &rr, u, x, r, s, p);
for(int iter = 0; iter < niter; iter++){
solve_CG_step(&rrp, &rr, u, x, r, p, s);
if(rr*snorm < enorm){
*nconv = iter;
break;
}
}
if(*nconv == -1){
printf(" not converged\n");
MPI_Finalize();
exit(1);
}
#ifdef _PROF
tmp = dtime();
#endif
copy(xq, x);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[COPY] += dtime() - tmp;
#endif
opr_DdagD_alt(r, u, x, 0);
#ifdef _PROF
tmp = dtime();
#endif
axpy(r, -1.0, b);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[AXPY] += dtime() - tmp;
tmp = dtime();
#endif
*diff = norm2(r);
#ifdef _PROF
hipDeviceSynchronize();
prof_t[NORM] += dtime() - tmp;
#endif
STATIC_FLAG = 0;
}
| 3c21cf848c758dcec4c101bbb506c83aad6440bd.cu | /*
CG Solver.
[2016.05.03 Hideo Matsufuru]
*/
#include "lattice.h"
extern real_t *p, *x;
static void solve_CG_init(real_t *__restrict__ rrp, real_t *__restrict__ rr, real_t *u,
real_t *x, real_t *r,
real_t *s, real_t *p)
{
#ifdef _PROF
double tmp = dtime();
#endif
copy(r, s);
copy(x, s);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[COPY] += dtime() - tmp;
#endif
opr_DdagD_alt(s, u, x, 0);
#ifdef _PROF
tmp = dtime();
#endif
axpy(r, -1.0, s);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[AXPY] += dtime() - tmp;
tmp = dtime();
#endif
copy(p, r);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[COPY] += dtime() - tmp;
tmp = dtime();
#endif
*rrp = *rr = norm2(r);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[NORM] += dtime() - tmp;
#endif
}
static void solve_CG_step(real_t *__restrict__ rrp2, real_t *__restrict__ rr2, real_t *u,
real_t *x, real_t *r,
real_t *p, real_t *v)
{
real_t rrp = *rrp2;
opr_DdagD_alt(v, u, p, 1);
#ifdef _PROF
double tmp = dtime();
#endif
real_t pap = dot(v, p);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[DOT] += dtime() - tmp;
#endif
real_t cr = rrp/pap;
#ifdef _PROF
tmp = dtime();
#endif
axpy(x, cr, p);
axpy(r, -cr, v);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[AXPY] += dtime() - tmp;
tmp = dtime();
#endif
real_t rr = norm2(r);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[NORM] += dtime() - tmp;
#endif
real_t bk = rr/rrp;
#ifdef _PROF
tmp = dtime();
#endif
scal(p, bk);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[SCAL] += dtime() - tmp;
tmp = dtime();
#endif
axpy(p, 1.0, r);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[AXPY] += dtime() - tmp;
#endif
*rr2 = *rrp2 = rr;
}
void solve_CG(const real_t enorm, int *__restrict__ nconv, real_t *__restrict__ diff, real_t *xq,
real_t *u, real_t *b)
{
int niter = 1000;
// static real_t *x, *s, *r, *p;
static real_t *s, *r;
static int STATIC_FLAG = 1;
// if(STATIC_FLAG) HANDLE_ERROR( cudaMalloc((void**)&x, NVST2*sizeof(real_t)) );
if(STATIC_FLAG) HANDLE_ERROR( cudaMalloc((void**)&s, NVST2*sizeof(real_t)) );
if(STATIC_FLAG) HANDLE_ERROR( cudaMalloc((void**)&r, NVST2*sizeof(real_t)) );
// if(STATIC_FLAG) HANDLE_ERROR( cudaMalloc((void**)&p, NVST2*sizeof(real_t)) );
#ifdef _PROF
double tmp = dtime();
#endif
copy(s, b);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[COPY] += dtime() - tmp;
tmp = dtime();
#endif
real_t sr = norm2(s);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[NORM] += dtime() - tmp;
#endif
real_t snorm = 1.0/sr;
real_t rr, rrp;
*nconv = -1;
solve_CG_init(&rrp, &rr, u, x, r, s, p);
for(int iter = 0; iter < niter; iter++){
solve_CG_step(&rrp, &rr, u, x, r, p, s);
if(rr*snorm < enorm){
*nconv = iter;
break;
}
}
if(*nconv == -1){
printf(" not converged\n");
MPI_Finalize();
exit(1);
}
#ifdef _PROF
tmp = dtime();
#endif
copy(xq, x);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[COPY] += dtime() - tmp;
#endif
opr_DdagD_alt(r, u, x, 0);
#ifdef _PROF
tmp = dtime();
#endif
axpy(r, -1.0, b);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[AXPY] += dtime() - tmp;
tmp = dtime();
#endif
*diff = norm2(r);
#ifdef _PROF
cudaDeviceSynchronize();
prof_t[NORM] += dtime() - tmp;
#endif
STATIC_FLAG = 0;
}
|
30463fc42c0227819b6df2019982c6747bcd7c3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/math.hpp>
namespace nbla {
// Sets head pointers of matrices in mini-batch.
template <typename T>
__global__ void kernel_set_batch_pointers(int batchSize, int n, const T **ptr,
const T *head) {
NBLA_CUDA_KERNEL_LOOP(idx, batchSize) { ptr[idx] = head + idx * n * n; }
}
// A macro that creates an array of pointers of matrices.
#define NBLA_GET_BATCH_POINTERS(PTR, NAME, BATCH, CONST) \
CudaCachedArray list_##PTR(sizeof(Tcu *) * BATCH, dtypes::BYTE, ctx); \
CONST Tcu **dev_list_##NAME = \
reinterpret_cast<CONST Tcu **>(list_##PTR.pointer<void>()); \
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_set_batch_pointers, BATCH, dim, \
(const T **)dev_list_##NAME, (const T *)PTR)
template <typename T, bool with_abs_log>
__global__ void kernel_compute_det(int batchSize, int n, T *y, const T *lu,
int *pivot) {
NBLA_CUDA_KERNEL_LOOP(idx, batchSize) {
y[idx] = 1.0;
int offset = idx * n * n;
int parity = 0;
// determinant is a product of diagonal entries of lu factorization
for (int i = 0; i < n; ++i) {
y[idx] *= lu[offset + i * n + i];
if (pivot[idx * n + i] != (i + 1))
++parity;
}
// flip sign dependently on pivot array that is equal to its index
T sign = 1.0 - 2.0 * (T)(parity % 2);
y[idx] *= sign;
// post operation
if (with_abs_log) {
y[idx] = log(abs(y[idx]));
}
}
}
// ----------------------------------------------------------------------
// With cublas<t>getrfBatched
// ----------------------------------------------------------------------
template <typename T, typename Tcu, bool with_abs_log>
void batch_det_forward(const Context &ctx, int device, const Variables &inputs,
const Variables &outputs, int dim, int batch_size) {
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(ctx);
Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(ctx, true);
shared_ptr<CudaCachedArray> pivot =
make_shared<CudaCachedArray>(dim * batch_size, dtypes::INT, ctx);
pivot->zero();
shared_ptr<CudaCachedArray> info =
make_shared<CudaCachedArray>(batch_size, dtypes::INT, ctx);
info->zero();
shared_ptr<CudaCachedArray> lu =
make_shared<CudaCachedArray>(inputs[0]->size(), get_dtype<Tcu>(), ctx);
lu->copy_from(inputs[0]->data()->cast(get_dtype<Tcu>(), ctx, false));
Tcu *lu_ptr = lu->pointer<Tcu>();
NBLA_GET_BATCH_POINTERS(lu_ptr, lu, batch_size, ); // dev_list_lu
// LU factorization
cuda_getrf_batched<Tcu>(device, dim, dev_list_lu, pivot->pointer<int>(),
info->pointer<int>(), batch_size);
auto kernel = kernel_compute_det<Tcu, with_abs_log>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, batch_size, dim, y, lu_ptr,
pivot->pointer<int>());
}
} // namespace nbla
| 30463fc42c0227819b6df2019982c6747bcd7c3c.cu | // Copyright 2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/math.hpp>
namespace nbla {
// Sets head pointers of matrices in mini-batch.
template <typename T>
__global__ void kernel_set_batch_pointers(int batchSize, int n, const T **ptr,
const T *head) {
NBLA_CUDA_KERNEL_LOOP(idx, batchSize) { ptr[idx] = head + idx * n * n; }
}
// A macro that creates an array of pointers of matrices.
#define NBLA_GET_BATCH_POINTERS(PTR, NAME, BATCH, CONST) \
CudaCachedArray list_##PTR(sizeof(Tcu *) * BATCH, dtypes::BYTE, ctx); \
CONST Tcu **dev_list_##NAME = \
reinterpret_cast<CONST Tcu **>(list_##PTR.pointer<void>()); \
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_set_batch_pointers, BATCH, dim, \
(const T **)dev_list_##NAME, (const T *)PTR)
template <typename T, bool with_abs_log>
__global__ void kernel_compute_det(int batchSize, int n, T *y, const T *lu,
int *pivot) {
NBLA_CUDA_KERNEL_LOOP(idx, batchSize) {
y[idx] = 1.0;
int offset = idx * n * n;
int parity = 0;
// determinant is a product of diagonal entries of lu factorization
for (int i = 0; i < n; ++i) {
y[idx] *= lu[offset + i * n + i];
if (pivot[idx * n + i] != (i + 1))
++parity;
}
// flip sign dependently on pivot array that is equal to its index
T sign = 1.0 - 2.0 * (T)(parity % 2);
y[idx] *= sign;
// post operation
if (with_abs_log) {
y[idx] = log(abs(y[idx]));
}
}
}
// ----------------------------------------------------------------------
// With cublas<t>getrfBatched
// ----------------------------------------------------------------------
template <typename T, typename Tcu, bool with_abs_log>
void batch_det_forward(const Context &ctx, int device, const Variables &inputs,
const Variables &outputs, int dim, int batch_size) {
const Tcu *x = inputs[0]->get_data_pointer<Tcu>(ctx);
Tcu *y = outputs[0]->cast_data_and_get_pointer<Tcu>(ctx, true);
shared_ptr<CudaCachedArray> pivot =
make_shared<CudaCachedArray>(dim * batch_size, dtypes::INT, ctx);
pivot->zero();
shared_ptr<CudaCachedArray> info =
make_shared<CudaCachedArray>(batch_size, dtypes::INT, ctx);
info->zero();
shared_ptr<CudaCachedArray> lu =
make_shared<CudaCachedArray>(inputs[0]->size(), get_dtype<Tcu>(), ctx);
lu->copy_from(inputs[0]->data()->cast(get_dtype<Tcu>(), ctx, false));
Tcu *lu_ptr = lu->pointer<Tcu>();
NBLA_GET_BATCH_POINTERS(lu_ptr, lu, batch_size, ); // dev_list_lu
// LU factorization
cuda_getrf_batched<Tcu>(device, dim, dev_list_lu, pivot->pointer<int>(),
info->pointer<int>(), batch_size);
auto kernel = kernel_compute_det<Tcu, with_abs_log>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, batch_size, dim, y, lu_ptr,
pivot->pointer<int>());
}
} // namespace nbla
|
baf796a2fc2b8246ae4af03010c1b906b18207a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/kernel_util.cuh"
#include "oneflow/user/kernels/model_update_kernel_util.h"
namespace oneflow {
namespace {
template<typename T, typename G>
__global__ void SGDUpdateGpu(int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const G* model_diff,
T* model) {
const T lr = *learning_rate;
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
SGDUpdateFunctor<T, G>()(model_diff + i, model + i, scale, l1, l2, weight_decay, lr);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesSGDUpdateGpu(const IDX data_elem_cnt, const K* indices,
const T* values, const float* learning_rate,
const IDX num_features, const IDX feature_size, T* model,
const IDX feature_id_offset) {
const T minus_lr = -*learning_rate;
CUDA_1D_KERNEL_LOOP_T(IDX, i, data_elem_cnt) {
const T val = values[i];
if (val != static_cast<T>(0)) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX feature_id = indices[indices_idx];
assert(feature_id >= 0);
const IDX local_feature_id = feature_id - feature_id_offset;
if (local_feature_id >= 0 && local_feature_id < num_features) {
const IDX update_offset = local_feature_id * feature_size + inner_idx;
gpu_atomic_add(model + update_offset, val * minus_lr);
}
}
}
}
} // namespace
template<typename T, typename G>
struct SGDUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const G* model_diff,
T* model);
};
template<typename T, typename G>
void SGDUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(DeviceCtx* ctx, int64_t n, T scale,
float l1, float l2, float weight_decay,
const float* learning_rate,
const T* scale_by_ptr, const G* model_diff,
T* model) {
hipLaunchKernelGGL(( SGDUpdateGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, weight_decay, learning_rate, scale_by_ptr, model_diff, model);
}
template<typename T>
struct SGDUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const float16* model_diff,
T* model);
};
template<typename T>
void SGDUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const float16* model_diff, T* model) {
SGDUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, weight_decay, learning_rate, scale_by_ptr,
reinterpret_cast<const half*>(model_diff), model);
}
template struct SGDUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K>
struct IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K> {
static void Update(DeviceCtx* ctx, int64_t num_indices, int64_t num_features,
int64_t feature_size, int64_t feature_id_offset, const float* learning_rate,
const K* indices, const T* values, T* model);
};
template<typename T, typename K>
void IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K>::Update(
DeviceCtx* ctx, int64_t num_indices, int64_t num_features, int64_t feature_size,
int64_t feature_id_offset, const float* learning_rate, const K* indices, const T* values,
T* model) {
const int64_t values_elem_cnt = num_indices * feature_size;
hipLaunchKernelGGL(( IndexedSlicesSGDUpdateGpu<T, K, int64_t>)
, dim3(BlocksNum4ThreadsNum(values_elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
values_elem_cnt, indices, values, learning_rate, num_features, feature_size, model,
feature_id_offset);
}
#define INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU(in_type_pair, index_type_pair) \
template struct IndexedSlicesSGDUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(in_type_pair), OF_PP_PAIR_FIRST(index_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ);
#undef INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU
namespace {
template<typename T, typename G>
__global__ void MomentumUpdateGpu(int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const G* model_diff, T* model,
T* momentum) {
const T lr = *learning_rate;
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
MomentumUpdateFunctor<T, G>()(model_diff + i, model + i, momentum + i, scale, l1, l2, beta,
weight_decay, lr);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesMomentumUpdateGpu(T beta, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model, T* momentum) {
const int64_t n = *num_unique_instance * feature_size;
const T lr = *learning_rate;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
MomentumUpdateFunctor<T, T>()(values + i, model + model_idx, momentum + model_idx,
static_cast<T>(1), 0.0, 0.0, beta, 0.0, lr);
}
}
}
} // namespace
template<typename T, typename G>
struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const G* model_diff, T* model, T* momentum);
};
template<typename T, typename G>
void MomentumUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const G* model_diff, T* model, T* momentum) {
hipLaunchKernelGGL(( MomentumUpdateGpu<T, G>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, beta, weight_decay, learning_rate, scale_by_ptr, model_diff, model,
momentum);
}
template<typename T>
struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const float16* model_diff, T* model, T* momentum);
};
template<typename T>
void MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const float16* model_diff, T* model,
T* momentum) {
MomentumUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta, weight_decay, learning_rate, scale_by_ptr,
reinterpret_cast<const half*>(model_diff), model, momentum);
}
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, T beta, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model,
T* momentum);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, T beta, int64_t num_instance, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate,
const K* indices, const T* values, T* model, T* momentum) {
hipLaunchKernelGGL(( IndexedSlicesMomentumUpdateGpu<T, K>)
, dim3(BlocksNum4ThreadsNum(num_instance * feature_size)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->cuda_stream(), beta, feature_size, lower_bound, upper_bound, num_unique_instance,
learning_rate, indices, values, model, momentum);
}
#define INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU( \
val_type_pair, key_type_pair, idx_type_pair) \
template struct IndexedSlicesMomentumMdUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU
namespace {
__global__ void AdamBiasCorrectionLearningRateGpu(float beta1, float beta2,
const float* learning_rate,
const int64_t* train_step, float* out) {
const auto exponent = static_cast<double>(*train_step + 1);
const float beta1_power = static_cast<float>(pow(beta1, exponent));
const float beta2_power = static_cast<float>(pow(beta2, exponent));
*out = *learning_rate * sqrt(1 - beta2_power) / (1 - beta1_power);
}
template<typename T, typename G>
__global__ void AdamUpdateGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const G* model_diff, T* model, T* m, T* v) {
const float lr = *learning_rate;
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
AdamUpdateFunctor<T, G>()(model_diff + i, model + i, m + i, v + i, scale, l1, l2, beta1, beta2,
epsilon, weight_decay, lr);
}
}
template<typename T>
__global__ void AdamUpdateBetaTGpu(const T beta1, const T beta2, T* beta1_t, T* beta2_t) {
*beta1_t *= beta1;
*beta2_t *= beta2;
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesAdamUpdateGpu(float beta1, float beta2, float epsilon,
int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model, T* m, T* v) {
const float lr = *learning_rate;
const int64_t n = *num_unique_instance * feature_size;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
AdamUpdateFunctor<T, T>()(values + i, model + model_idx, m + model_idx, v + model_idx,
static_cast<T>(1), 0, 0, beta1, beta2, epsilon, 0, lr);
}
}
}
template<typename T, typename G>
__global__ void LambGradGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, const T* beta1_t, const T* beta2_t,
const T* scale_by_ptr, const G* model_diff, T* adam_diff, T* model,
T* m, T* v) {
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
LambGradFunctor<T, G>()(beta1_t, beta2_t, model_diff + i, adam_diff + i, model + i, m + i,
v + i, scale, l1, l2, beta1, beta2, epsilon);
}
}
template<typename T>
__global__ void LambUpdateGpu(int64_t n, float weight_decay, const float* learning_rate,
const T* w_norm, const T* g_norm, const T* beta1_t, const T* beta2_t,
const T* adam_diff, T* model) {
const float lr = LambLRFunctor<T>()(*learning_rate, w_norm, g_norm);
CUDA_1D_KERNEL_LOOP(i, n) { LambUpdateFunctor<T>()(lr, weight_decay, adam_diff + i, model + i); }
}
} // namespace
template<typename T, typename G>
struct AdamUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const G* model_diff, T* model, T* m, T* v);
};
template<typename T, typename G>
void AdamUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon,
float weight_decay, const float* learning_rate, const T* scale_by_ptr, const G* model_diff,
T* model, T* m, T* v) {
hipLaunchKernelGGL(( AdamUpdateGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr,
model_diff, model, m, v);
}
template<typename T>
struct AdamUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const float16* model_diff, T* model, T* m, T* v);
};
template<typename T>
void AdamUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon,
float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const float16* model_diff, T* model, T* m, T* v) {
AdamUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr,
reinterpret_cast<const half*>(model_diff), model, m, v);
}
template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct AdamUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename G>
struct LambUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const G* model_diff, T* adam_diff, T* model, T* m, T* v,
T* norm_buffer, T* beta1_t, T* beta2_t);
};
template<typename T, typename G>
void LambUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const G* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t,
T* beta2_t) {
hipLaunchKernelGGL(( AdamUpdateBetaTGpu<T>), dim3(1), dim3(1), 0, ctx->cuda_stream(), beta1, beta2, beta1_t, beta2_t);
hipLaunchKernelGGL(( LambGradGpu<T, G>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, beta1, beta2, epsilon, beta1_t, beta2_t, scale_by_ptr, model_diff,
adam_diff, model, m, v);
T* w_norm = norm_buffer;
T* g_norm = norm_buffer + 1;
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model, 1, model, 1, w_norm);
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, adam_diff, 1, adam_diff, 1, g_norm);
KernelUtil<DeviceType::kGPU, T>::Sqrt(ctx, 2, norm_buffer, norm_buffer);
hipLaunchKernelGGL(( LambUpdateGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, weight_decay, learning_rate, w_norm, g_norm, beta1_t, beta2_t, adam_diff, model);
}
template<typename T>
struct LambUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const float16* model_diff, T* adam_diff, T* model, T* m,
T* v, T* norm_buffer, T* beta1_t, T* beta2_t);
};
template<typename T>
void LambUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const float16* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t,
T* beta2_t) {
LambUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr,
reinterpret_cast<const half*>(model_diff), adam_diff, model, m, v, norm_buffer, beta1_t,
beta2_t);
}
template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct LambUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, float beta1, float beta2, float epsilon, int64_t num_instance,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound,
const IDX* num_unique_instance, const float* learning_rate, const K* indices,
const T* values, T* model, T* m, T* v);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, float beta1, float beta2, float epsilon, int64_t num_instance,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model, T* m, T* v) {
hipLaunchKernelGGL(( IndexedSlicesAdamUpdateGpu<T, K>)
, dim3(BlocksNum4ThreadsNum(num_instance * feature_size)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->cuda_stream(), beta1, beta2, epsilon, feature_size, lower_bound, upper_bound,
num_unique_instance, learning_rate, indices, values, model, m, v);
}
#define INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU(val_type_pair, key_type_pair, \
idx_type_pair) \
template struct IndexedSlicesAdamMdUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU
template<>
struct AdamBiasCorrectionLearningRateKernelUtil<DeviceType::kGPU> {
static void AdamBiasCorrectionLearningRate(DeviceCtx* ctx, float beta1, float beta2,
const float* learning_rate, const int64_t* train_step,
float* out);
};
void AdamBiasCorrectionLearningRateKernelUtil<DeviceType::kGPU>::AdamBiasCorrectionLearningRate(
DeviceCtx* ctx, float beta1, float beta2, const float* learning_rate, const int64_t* train_step,
float* out) {
hipLaunchKernelGGL(( AdamBiasCorrectionLearningRateGpu), dim3(1), dim3(1), 0, ctx->cuda_stream(), beta1, beta2, learning_rate,
train_step, out);
}
namespace {
template<typename T, typename G, bool centered>
__global__ void RmsPropUpdateGpu(int64_t n, T scale, float l1, float l2, T* mean_square,
T* mean_gradient, float epsilon, float weight_decay,
float decay_rate, const float* learning_rate,
const T* scale_by_ptr, const G* model_diff, T* model) {
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
RmsPropUpdateFunctor<T, G, centered>()(model_diff + i, model + i, n, scale, l1, l2,
mean_square + i,
(centered ? mean_gradient + i : nullptr), epsilon,
weight_decay, decay_rate, *learning_rate);
}
}
} // namespace
template<typename T, typename G>
struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate,
const float* learning_rate, const T* scale_by_ptr, const G* model_diff,
T* model, T* mean_square, T* mean_gradient);
};
template<typename T, typename G>
void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, const float* learning_rate, const T* scale_by_ptr,
const G* model_diff, T* model, T* mean_square, T* mean_gradient) {
if (centered) {
hipLaunchKernelGGL(( RmsPropUpdateGpu<T, G, true>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate, scale_by_ptr, model_diff, model);
} else {
hipLaunchKernelGGL(( RmsPropUpdateGpu<T, G, false>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate, scale_by_ptr, model_diff, model);
}
}
template<typename T>
struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate,
const float* learning_rate, const T* scale_by_ptr, const float16* model_diff,
T* model, T* mean_square, T* mean_gradient);
};
template<typename T>
void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, const float* learning_rate, const T* scale_by_ptr,
const float16* model_diff, T* model, T* mean_square, T* mean_gradient) {
RmsPropUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, centered, epsilon, weight_decay, decay_rate, learning_rate,
scale_by_ptr, reinterpret_cast<const half*>(model_diff), model, mean_square, mean_gradient);
}
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float16>;
namespace {
template<typename T, typename G>
__global__ void LarsScaleModelDiffGpu(int64_t n, T scale, float l1, float l2, const T* scale_by_ptr,
const G* model_diff, T* model, T* model_diff_tmp) {
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
model_diff_tmp[i] =
CastScaleRegularizeGradientFunctor<T, G>()(model_diff[i], model[i], scale, l1, l2);
}
}
template<typename T>
__global__ void LarsGetLocalLearningRateGpu(const float* learning_rate, T weight_decay, T epsilon,
T lars_coefficient, const int64_t* train_step,
T* data_tmp) {
T* model_norm = &data_tmp[0];
T* model_diff_norm = &data_tmp[1];
T* local_learning_rate = &data_tmp[2];
*model_norm = std::sqrt(*model_norm);
*model_diff_norm = std::sqrt(*model_diff_norm);
if (*train_step == 0) {
*local_learning_rate =
*learning_rate * lars_coefficient * (*model_norm) / (epsilon + (*model_diff_norm));
} else {
*local_learning_rate = *learning_rate * lars_coefficient * (*model_norm)
/ (epsilon + (*model_diff_norm) + weight_decay * (*model_diff_norm));
}
}
template<typename T>
__global__ void LarsUpdateGpu(int64_t n, float momentum_beta, T* momentum, float weight_decay,
T* local_learning_rate, T* model_diff_tmp, T* model) {
CUDA_1D_KERNEL_LOOP(i, n) {
LarsUpdateFunctor<T>()(model_diff_tmp + i, model + i, momentum_beta, momentum + i, weight_decay,
*local_learning_rate);
}
}
} // namespace
template<typename T, typename G>
struct LarsUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta,
float epsilon, float lars_coefficient, float weight_decay,
const float* learning_rate, const int64_t* train_step, const T* scale_by_ptr,
const G* model_diff, T* model, T* momentum, T* data_tmp, T* model_diff_tmp);
};
template<typename T, typename G>
void LarsUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, const float* learning_rate,
const int64_t* train_step, const T* scale_by_ptr, const G* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp) {
hipLaunchKernelGGL(( LarsScaleModelDiffGpu<T, G>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, l1, l2, scale_by_ptr, model_diff, model, model_diff_tmp);
T* model_norm = data_tmp;
T* model_diff_norm = data_tmp + 1;
T* local_learning_rate = data_tmp + 2;
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model, 1, model, 1, model_norm);
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model_diff_tmp, 1, model_diff_tmp, 1,
model_diff_norm);
hipLaunchKernelGGL(( LarsGetLocalLearningRateGpu<T>), dim3(1), dim3(1), 0, ctx->cuda_stream(),
learning_rate, weight_decay, epsilon, lars_coefficient, train_step, data_tmp);
hipLaunchKernelGGL(( LarsUpdateGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, momentum_beta, momentum, weight_decay, local_learning_rate, model_diff_tmp, model);
}
template<typename T>
struct LarsUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta,
float epsilon, float lars_coefficient, float weight_decay,
const float* learning_rate, const int64_t* train_step, const T* scale_by_ptr,
const float16* model_diff, T* model, T* momentum, T* data_tmp,
T* model_diff_tmp);
};
template<typename T>
void LarsUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, const float* learning_rate,
const int64_t* train_step, const T* scale_by_ptr, const float16* model_diff, T* model,
T* momentum, T* data_tmp, T* model_diff_tmp) {
LarsUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, momentum_beta, epsilon, lars_coefficient, weight_decay, learning_rate,
train_step, scale_by_ptr, reinterpret_cast<const half*>(model_diff), model, momentum,
data_tmp, model_diff_tmp);
}
template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct LarsUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float16>;
} // namespace oneflow
| baf796a2fc2b8246ae4af03010c1b906b18207a1.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/kernel_util.cuh"
#include "oneflow/user/kernels/model_update_kernel_util.h"
namespace oneflow {
namespace {
template<typename T, typename G>
__global__ void SGDUpdateGpu(int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const G* model_diff,
T* model) {
const T lr = *learning_rate;
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
SGDUpdateFunctor<T, G>()(model_diff + i, model + i, scale, l1, l2, weight_decay, lr);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesSGDUpdateGpu(const IDX data_elem_cnt, const K* indices,
const T* values, const float* learning_rate,
const IDX num_features, const IDX feature_size, T* model,
const IDX feature_id_offset) {
const T minus_lr = -*learning_rate;
CUDA_1D_KERNEL_LOOP_T(IDX, i, data_elem_cnt) {
const T val = values[i];
if (val != static_cast<T>(0)) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX feature_id = indices[indices_idx];
assert(feature_id >= 0);
const IDX local_feature_id = feature_id - feature_id_offset;
if (local_feature_id >= 0 && local_feature_id < num_features) {
const IDX update_offset = local_feature_id * feature_size + inner_idx;
gpu_atomic_add(model + update_offset, val * minus_lr);
}
}
}
}
} // namespace
template<typename T, typename G>
struct SGDUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const G* model_diff,
T* model);
};
template<typename T, typename G>
void SGDUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(DeviceCtx* ctx, int64_t n, T scale,
float l1, float l2, float weight_decay,
const float* learning_rate,
const T* scale_by_ptr, const G* model_diff,
T* model) {
SGDUpdateGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, weight_decay, learning_rate, scale_by_ptr, model_diff, model);
}
template<typename T>
struct SGDUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const float16* model_diff,
T* model);
};
template<typename T>
void SGDUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const float16* model_diff, T* model) {
SGDUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, weight_decay, learning_rate, scale_by_ptr,
reinterpret_cast<const half*>(model_diff), model);
}
template struct SGDUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct SGDUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K>
struct IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K> {
static void Update(DeviceCtx* ctx, int64_t num_indices, int64_t num_features,
int64_t feature_size, int64_t feature_id_offset, const float* learning_rate,
const K* indices, const T* values, T* model);
};
template<typename T, typename K>
void IndexedSlicesSGDUpdateKernelUtil<DeviceType::kGPU, T, K>::Update(
DeviceCtx* ctx, int64_t num_indices, int64_t num_features, int64_t feature_size,
int64_t feature_id_offset, const float* learning_rate, const K* indices, const T* values,
T* model) {
const int64_t values_elem_cnt = num_indices * feature_size;
IndexedSlicesSGDUpdateGpu<T, K, int64_t>
<<<BlocksNum4ThreadsNum(values_elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
values_elem_cnt, indices, values, learning_rate, num_features, feature_size, model,
feature_id_offset);
}
#define INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU(in_type_pair, index_type_pair) \
template struct IndexedSlicesSGDUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(in_type_pair), OF_PP_PAIR_FIRST(index_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ);
#undef INITIATE_INDEXED_SLICES_SGD_UPDATE_KERNEL_UTIL_GPU
namespace {
template<typename T, typename G>
__global__ void MomentumUpdateGpu(int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const G* model_diff, T* model,
T* momentum) {
const T lr = *learning_rate;
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
MomentumUpdateFunctor<T, G>()(model_diff + i, model + i, momentum + i, scale, l1, l2, beta,
weight_decay, lr);
}
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesMomentumUpdateGpu(T beta, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model, T* momentum) {
const int64_t n = *num_unique_instance * feature_size;
const T lr = *learning_rate;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
MomentumUpdateFunctor<T, T>()(values + i, model + model_idx, momentum + model_idx,
static_cast<T>(1), 0.0, 0.0, beta, 0.0, lr);
}
}
}
} // namespace
template<typename T, typename G>
struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const G* model_diff, T* model, T* momentum);
};
template<typename T, typename G>
void MomentumUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const G* model_diff, T* model, T* momentum) {
MomentumUpdateGpu<T, G>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, beta, weight_decay, learning_rate, scale_by_ptr, model_diff, model,
momentum);
}
template<typename T>
struct MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta,
float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const float16* model_diff, T* model, T* momentum);
};
template<typename T>
void MomentumUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta, float weight_decay,
const float* learning_rate, const T* scale_by_ptr, const float16* model_diff, T* model,
T* momentum) {
MomentumUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta, weight_decay, learning_rate, scale_by_ptr,
reinterpret_cast<const half*>(model_diff), model, momentum);
}
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct MomentumUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, T beta, int64_t num_instance, int64_t feature_size,
int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model,
T* momentum);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesMomentumMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, T beta, int64_t num_instance, int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance, const float* learning_rate,
const K* indices, const T* values, T* model, T* momentum) {
IndexedSlicesMomentumUpdateGpu<T, K>
<<<BlocksNum4ThreadsNum(num_instance * feature_size), kCudaThreadsNumPerBlock, 0,
ctx->cuda_stream()>>>(beta, feature_size, lower_bound, upper_bound, num_unique_instance,
learning_rate, indices, values, model, momentum);
}
#define INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU( \
val_type_pair, key_type_pair, idx_type_pair) \
template struct IndexedSlicesMomentumMdUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_MOMENTUM_MODEL_UPDATE_KERNEL_UTIL_GPU
namespace {
__global__ void AdamBiasCorrectionLearningRateGpu(float beta1, float beta2,
const float* learning_rate,
const int64_t* train_step, float* out) {
const auto exponent = static_cast<double>(*train_step + 1);
const float beta1_power = static_cast<float>(pow(beta1, exponent));
const float beta2_power = static_cast<float>(pow(beta2, exponent));
*out = *learning_rate * sqrt(1 - beta2_power) / (1 - beta1_power);
}
template<typename T, typename G>
__global__ void AdamUpdateGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const G* model_diff, T* model, T* m, T* v) {
const float lr = *learning_rate;
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
AdamUpdateFunctor<T, G>()(model_diff + i, model + i, m + i, v + i, scale, l1, l2, beta1, beta2,
epsilon, weight_decay, lr);
}
}
template<typename T>
__global__ void AdamUpdateBetaTGpu(const T beta1, const T beta2, T* beta1_t, T* beta2_t) {
*beta1_t *= beta1;
*beta2_t *= beta2;
}
template<typename T, typename K, typename IDX>
__global__ void IndexedSlicesAdamUpdateGpu(float beta1, float beta2, float epsilon,
int64_t feature_size, int64_t lower_bound,
int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices,
const T* values, T* model, T* m, T* v) {
const float lr = *learning_rate;
const int64_t n = *num_unique_instance * feature_size;
CUDA_1D_KERNEL_LOOP(i, n) {
const IDX indices_idx = i / feature_size;
const IDX inner_idx = i - indices_idx * feature_size;
const IDX instance_id = indices[indices_idx];
if (instance_id >= lower_bound && instance_id < upper_bound) {
const IDX model_idx = (instance_id - lower_bound) * feature_size + inner_idx;
AdamUpdateFunctor<T, T>()(values + i, model + model_idx, m + model_idx, v + model_idx,
static_cast<T>(1), 0, 0, beta1, beta2, epsilon, 0, lr);
}
}
}
template<typename T, typename G>
__global__ void LambGradGpu(int64_t n, T scale, float l1, float l2, float beta1, float beta2,
float epsilon, const T* beta1_t, const T* beta2_t,
const T* scale_by_ptr, const G* model_diff, T* adam_diff, T* model,
T* m, T* v) {
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
LambGradFunctor<T, G>()(beta1_t, beta2_t, model_diff + i, adam_diff + i, model + i, m + i,
v + i, scale, l1, l2, beta1, beta2, epsilon);
}
}
template<typename T>
__global__ void LambUpdateGpu(int64_t n, float weight_decay, const float* learning_rate,
const T* w_norm, const T* g_norm, const T* beta1_t, const T* beta2_t,
const T* adam_diff, T* model) {
const float lr = LambLRFunctor<T>()(*learning_rate, w_norm, g_norm);
CUDA_1D_KERNEL_LOOP(i, n) { LambUpdateFunctor<T>()(lr, weight_decay, adam_diff + i, model + i); }
}
} // namespace
template<typename T, typename G>
struct AdamUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const G* model_diff, T* model, T* m, T* v);
};
template<typename T, typename G>
void AdamUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon,
float weight_decay, const float* learning_rate, const T* scale_by_ptr, const G* model_diff,
T* model, T* m, T* v) {
AdamUpdateGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr,
model_diff, model, m, v);
}
template<typename T>
struct AdamUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const float16* model_diff, T* model, T* m, T* v);
};
template<typename T>
void AdamUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float beta1, float beta2, float epsilon,
float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const float16* model_diff, T* model, T* m, T* v) {
AdamUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr,
reinterpret_cast<const half*>(model_diff), model, m, v);
}
template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct AdamUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct AdamUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename G>
struct LambUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const G* model_diff, T* adam_diff, T* model, T* m, T* v,
T* norm_buffer, T* beta1_t, T* beta2_t);
};
template<typename T, typename G>
void LambUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const G* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t,
T* beta2_t) {
AdamUpdateBetaTGpu<T><<<1, 1, 0, ctx->cuda_stream()>>>(beta1, beta2, beta1_t, beta2_t);
LambGradGpu<T, G><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, beta1, beta2, epsilon, beta1_t, beta2_t, scale_by_ptr, model_diff,
adam_diff, model, m, v);
T* w_norm = norm_buffer;
T* g_norm = norm_buffer + 1;
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model, 1, model, 1, w_norm);
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, adam_diff, 1, adam_diff, 1, g_norm);
KernelUtil<DeviceType::kGPU, T>::Sqrt(ctx, 2, norm_buffer, norm_buffer);
LambUpdateGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, weight_decay, learning_rate, w_norm, g_norm, beta1_t, beta2_t, adam_diff, model);
}
template<typename T>
struct LambUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1,
float beta2, float epsilon, float weight_decay, const float* learning_rate,
const T* scale_by_ptr, const float16* model_diff, T* adam_diff, T* model, T* m,
T* v, T* norm_buffer, T* beta1_t, T* beta2_t);
};
template<typename T>
void LambUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, float scale, float l1, float l2, float beta1, float beta2,
float epsilon, float weight_decay, const float* learning_rate, const T* scale_by_ptr,
const float16* model_diff, T* adam_diff, T* model, T* m, T* v, T* norm_buffer, T* beta1_t,
T* beta2_t) {
LambUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, beta1, beta2, epsilon, weight_decay, learning_rate, scale_by_ptr,
reinterpret_cast<const half*>(model_diff), adam_diff, model, m, v, norm_buffer, beta1_t,
beta2_t);
}
template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct LambUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct LambUpdateKernelUtil<DeviceType::kGPU, float, float16>;
template<typename T, typename K, typename IDX>
struct IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX> {
static void Update(DeviceCtx* ctx, float beta1, float beta2, float epsilon, int64_t num_instance,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound,
const IDX* num_unique_instance, const float* learning_rate, const K* indices,
const T* values, T* model, T* m, T* v);
};
template<typename T, typename K, typename IDX>
void IndexedSlicesAdamMdUpdateKernelUtil<DeviceType::kGPU, T, K, IDX>::Update(
DeviceCtx* ctx, float beta1, float beta2, float epsilon, int64_t num_instance,
int64_t feature_size, int64_t lower_bound, int64_t upper_bound, const IDX* num_unique_instance,
const float* learning_rate, const K* indices, const T* values, T* model, T* m, T* v) {
IndexedSlicesAdamUpdateGpu<T, K>
<<<BlocksNum4ThreadsNum(num_instance * feature_size), kCudaThreadsNumPerBlock, 0,
ctx->cuda_stream()>>>(beta1, beta2, epsilon, feature_size, lower_bound, upper_bound,
num_unique_instance, learning_rate, indices, values, model, m, v);
}
#define INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU(val_type_pair, key_type_pair, \
idx_type_pair) \
template struct IndexedSlicesAdamMdUpdateKernelUtil< \
DeviceType::kGPU, OF_PP_PAIR_FIRST(val_type_pair), OF_PP_PAIR_FIRST(key_type_pair), \
OF_PP_PAIR_FIRST(idx_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU,
FLOATING_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ, INT_DATA_TYPE_SEQ);
#undef INSTANTIATE_INDEXED_SLICES_ADAM_MODEL_UPDATE_KERNEL_UTIL_GPU
template<>
struct AdamBiasCorrectionLearningRateKernelUtil<DeviceType::kGPU> {
static void AdamBiasCorrectionLearningRate(DeviceCtx* ctx, float beta1, float beta2,
const float* learning_rate, const int64_t* train_step,
float* out);
};
void AdamBiasCorrectionLearningRateKernelUtil<DeviceType::kGPU>::AdamBiasCorrectionLearningRate(
DeviceCtx* ctx, float beta1, float beta2, const float* learning_rate, const int64_t* train_step,
float* out) {
AdamBiasCorrectionLearningRateGpu<<<1, 1, 0, ctx->cuda_stream()>>>(beta1, beta2, learning_rate,
train_step, out);
}
namespace {
template<typename T, typename G, bool centered>
__global__ void RmsPropUpdateGpu(int64_t n, T scale, float l1, float l2, T* mean_square,
T* mean_gradient, float epsilon, float weight_decay,
float decay_rate, const float* learning_rate,
const T* scale_by_ptr, const G* model_diff, T* model) {
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
RmsPropUpdateFunctor<T, G, centered>()(model_diff + i, model + i, n, scale, l1, l2,
mean_square + i,
(centered ? mean_gradient + i : nullptr), epsilon,
weight_decay, decay_rate, *learning_rate);
}
}
} // namespace
template<typename T, typename G>
struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate,
const float* learning_rate, const T* scale_by_ptr, const G* model_diff,
T* model, T* mean_square, T* mean_gradient);
};
template<typename T, typename G>
void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, const float* learning_rate, const T* scale_by_ptr,
const G* model_diff, T* model, T* mean_square, T* mean_gradient) {
if (centered) {
RmsPropUpdateGpu<T, G, true>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate, scale_by_ptr, model_diff, model);
} else {
RmsPropUpdateGpu<T, G, false>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, mean_square, mean_gradient, epsilon, weight_decay, decay_rate,
learning_rate, scale_by_ptr, model_diff, model);
}
}
template<typename T>
struct RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered,
float epsilon, float weight_decay, float decay_rate,
const float* learning_rate, const T* scale_by_ptr, const float16* model_diff,
T* model, T* mean_square, T* mean_gradient);
};
template<typename T>
void RmsPropUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, bool centered, float epsilon,
float weight_decay, float decay_rate, const float* learning_rate, const T* scale_by_ptr,
const float16* model_diff, T* model, T* mean_square, T* mean_gradient) {
RmsPropUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, centered, epsilon, weight_decay, decay_rate, learning_rate,
scale_by_ptr, reinterpret_cast<const half*>(model_diff), model, mean_square, mean_gradient);
}
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct RmsPropUpdateKernelUtil<DeviceType::kGPU, float, float16>;
namespace {
template<typename T, typename G>
__global__ void LarsScaleModelDiffGpu(int64_t n, T scale, float l1, float l2, const T* scale_by_ptr,
const G* model_diff, T* model, T* model_diff_tmp) {
if (scale_by_ptr != nullptr) { scale *= *scale_by_ptr; }
CUDA_1D_KERNEL_LOOP(i, n) {
model_diff_tmp[i] =
CastScaleRegularizeGradientFunctor<T, G>()(model_diff[i], model[i], scale, l1, l2);
}
}
template<typename T>
__global__ void LarsGetLocalLearningRateGpu(const float* learning_rate, T weight_decay, T epsilon,
T lars_coefficient, const int64_t* train_step,
T* data_tmp) {
T* model_norm = &data_tmp[0];
T* model_diff_norm = &data_tmp[1];
T* local_learning_rate = &data_tmp[2];
*model_norm = std::sqrt(*model_norm);
*model_diff_norm = std::sqrt(*model_diff_norm);
if (*train_step == 0) {
*local_learning_rate =
*learning_rate * lars_coefficient * (*model_norm) / (epsilon + (*model_diff_norm));
} else {
*local_learning_rate = *learning_rate * lars_coefficient * (*model_norm)
/ (epsilon + (*model_diff_norm) + weight_decay * (*model_diff_norm));
}
}
template<typename T>
__global__ void LarsUpdateGpu(int64_t n, float momentum_beta, T* momentum, float weight_decay,
T* local_learning_rate, T* model_diff_tmp, T* model) {
CUDA_1D_KERNEL_LOOP(i, n) {
LarsUpdateFunctor<T>()(model_diff_tmp + i, model + i, momentum_beta, momentum + i, weight_decay,
*local_learning_rate);
}
}
} // namespace
template<typename T, typename G>
struct LarsUpdateKernelUtil<DeviceType::kGPU, T, G> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta,
float epsilon, float lars_coefficient, float weight_decay,
const float* learning_rate, const int64_t* train_step, const T* scale_by_ptr,
const G* model_diff, T* model, T* momentum, T* data_tmp, T* model_diff_tmp);
};
template<typename T, typename G>
void LarsUpdateKernelUtil<DeviceType::kGPU, T, G>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, const float* learning_rate,
const int64_t* train_step, const T* scale_by_ptr, const G* model_diff, T* model, T* momentum,
T* data_tmp, T* model_diff_tmp) {
LarsScaleModelDiffGpu<T, G>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, l1, l2, scale_by_ptr, model_diff, model, model_diff_tmp);
T* model_norm = data_tmp;
T* model_diff_norm = data_tmp + 1;
T* local_learning_rate = data_tmp + 2;
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model, 1, model, 1, model_norm);
KernelUtil<DeviceType::kGPU, T>::Dot(ctx, n, model_diff_tmp, 1, model_diff_tmp, 1,
model_diff_norm);
LarsGetLocalLearningRateGpu<T><<<1, 1, 0, ctx->cuda_stream()>>>(
learning_rate, weight_decay, epsilon, lars_coefficient, train_step, data_tmp);
LarsUpdateGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, momentum_beta, momentum, weight_decay, local_learning_rate, model_diff_tmp, model);
}
template<typename T>
struct LarsUpdateKernelUtil<DeviceType::kGPU, T, float16> {
static void Update(DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta,
float epsilon, float lars_coefficient, float weight_decay,
const float* learning_rate, const int64_t* train_step, const T* scale_by_ptr,
const float16* model_diff, T* model, T* momentum, T* data_tmp,
T* model_diff_tmp);
};
template<typename T>
void LarsUpdateKernelUtil<DeviceType::kGPU, T, float16>::Update(
DeviceCtx* ctx, int64_t n, T scale, float l1, float l2, float momentum_beta, float epsilon,
float lars_coefficient, float weight_decay, const float* learning_rate,
const int64_t* train_step, const T* scale_by_ptr, const float16* model_diff, T* model,
T* momentum, T* data_tmp, T* model_diff_tmp) {
LarsUpdateKernelUtil<DeviceType::kGPU, T, half>::Update(
ctx, n, scale, l1, l2, momentum_beta, epsilon, lars_coefficient, weight_decay, learning_rate,
train_step, scale_by_ptr, reinterpret_cast<const half*>(model_diff), model, momentum,
data_tmp, model_diff_tmp);
}
template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float>;
template struct LarsUpdateKernelUtil<DeviceType::kGPU, double, double>;
template struct LarsUpdateKernelUtil<DeviceType::kGPU, float, float16>;
} // namespace oneflow
|
dc7d20af22ba169054fe42151fe404528e6ca86b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <raft/handle.hpp>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/path_retrieval.hpp>
namespace cugraph {
namespace detail {
template <typename vertex_t, typename weight_t>
__global__ void get_traversed_cost_kernel(vertex_t const* vertices,
vertex_t const* preds,
vertex_t const* vtx_map,
weight_t const* info_weights,
weight_t* out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
for (vertex_t i = threadIdx.x + blockIdx.x * blockDim.x; i < num_vertices;
i += gridDim.x * blockDim.x) {
weight_t sum = info_weights[i];
vertex_t pred = preds[i];
while (pred != stop_vertex) {
vertex_t pos = vtx_map[pred];
sum += info_weights[pos];
pred = preds[pos];
}
out[i] = sum;
}
}
template <typename vertex_t, typename weight_t>
void get_traversed_cost_impl(raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t const* preds,
weight_t const* info_weights,
weight_t* out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
auto stream = handle.get_stream();
vertex_t max_blocks = handle.get_device_properties().maxGridSize[0];
vertex_t max_threads = handle.get_device_properties().maxThreadsPerBlock;
dim3 nthreads, nblocks;
nthreads.x = std::min<vertex_t>(num_vertices, max_threads);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = std::min<vertex_t>((num_vertices + nthreads.x - 1) / nthreads.x, max_blocks);
nblocks.y = 1;
nblocks.z = 1;
rmm::device_uvector<vertex_t> vtx_map_v(num_vertices, stream);
rmm::device_uvector<vertex_t> vtx_keys_v(num_vertices, stream);
vertex_t* vtx_map = vtx_map_v.data();
vertex_t* vtx_keys = vtx_keys_v.data();
raft::copy(vtx_keys, vertices, num_vertices, stream);
thrust::sequence(handle.get_thrust_policy(), vtx_map, vtx_map + num_vertices);
thrust::stable_sort_by_key(
handle.get_thrust_policy(), vtx_keys, vtx_keys + num_vertices, vtx_map);
hipLaunchKernelGGL(( get_traversed_cost_kernel), dim3(nblocks), dim3(nthreads), 0, 0,
vertices, preds, vtx_map, info_weights, out, stop_vertex, num_vertices);
}
} // namespace detail
template <typename vertex_t, typename weight_t>
void get_traversed_cost(raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t const* preds,
weight_t const* info_weights,
weight_t* out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
CUGRAPH_EXPECTS(num_vertices > 0, "num_vertices should be strictly positive");
CUGRAPH_EXPECTS(out != nullptr, "out should be of size num_vertices");
cugraph::detail::get_traversed_cost_impl(
handle, vertices, preds, info_weights, out, stop_vertex, num_vertices);
}
template void get_traversed_cost<int32_t, float>(raft::handle_t const& handle,
int32_t const* vertices,
int32_t const* preds,
float const* info_weights,
float* out,
int32_t stop_vertex,
int32_t num_vertices);
template void get_traversed_cost<int32_t, double>(raft::handle_t const& handle,
int32_t const* vertices,
int32_t const* preds,
double const* info_weights,
double* out,
int32_t stop_vertex,
int32_t num_vertices);
template void get_traversed_cost<int64_t, float>(raft::handle_t const& handle,
int64_t const* vertices,
int64_t const* preds,
float const* info_weights,
float* out,
int64_t stop_vertex,
int64_t num_vertices);
template void get_traversed_cost<int64_t, double>(raft::handle_t const& handle,
int64_t const* vertices,
int64_t const* preds,
double const* info_weights,
double* out,
int64_t stop_vertex,
int64_t num_vertices);
} // namespace cugraph
| dc7d20af22ba169054fe42151fe404528e6ca86b.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <raft/handle.hpp>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/path_retrieval.hpp>
namespace cugraph {
namespace detail {
template <typename vertex_t, typename weight_t>
__global__ void get_traversed_cost_kernel(vertex_t const* vertices,
vertex_t const* preds,
vertex_t const* vtx_map,
weight_t const* info_weights,
weight_t* out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
for (vertex_t i = threadIdx.x + blockIdx.x * blockDim.x; i < num_vertices;
i += gridDim.x * blockDim.x) {
weight_t sum = info_weights[i];
vertex_t pred = preds[i];
while (pred != stop_vertex) {
vertex_t pos = vtx_map[pred];
sum += info_weights[pos];
pred = preds[pos];
}
out[i] = sum;
}
}
template <typename vertex_t, typename weight_t>
void get_traversed_cost_impl(raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t const* preds,
weight_t const* info_weights,
weight_t* out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
auto stream = handle.get_stream();
vertex_t max_blocks = handle.get_device_properties().maxGridSize[0];
vertex_t max_threads = handle.get_device_properties().maxThreadsPerBlock;
dim3 nthreads, nblocks;
nthreads.x = std::min<vertex_t>(num_vertices, max_threads);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = std::min<vertex_t>((num_vertices + nthreads.x - 1) / nthreads.x, max_blocks);
nblocks.y = 1;
nblocks.z = 1;
rmm::device_uvector<vertex_t> vtx_map_v(num_vertices, stream);
rmm::device_uvector<vertex_t> vtx_keys_v(num_vertices, stream);
vertex_t* vtx_map = vtx_map_v.data();
vertex_t* vtx_keys = vtx_keys_v.data();
raft::copy(vtx_keys, vertices, num_vertices, stream);
thrust::sequence(handle.get_thrust_policy(), vtx_map, vtx_map + num_vertices);
thrust::stable_sort_by_key(
handle.get_thrust_policy(), vtx_keys, vtx_keys + num_vertices, vtx_map);
get_traversed_cost_kernel<<<nblocks, nthreads>>>(
vertices, preds, vtx_map, info_weights, out, stop_vertex, num_vertices);
}
} // namespace detail
template <typename vertex_t, typename weight_t>
void get_traversed_cost(raft::handle_t const& handle,
vertex_t const* vertices,
vertex_t const* preds,
weight_t const* info_weights,
weight_t* out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
CUGRAPH_EXPECTS(num_vertices > 0, "num_vertices should be strictly positive");
CUGRAPH_EXPECTS(out != nullptr, "out should be of size num_vertices");
cugraph::detail::get_traversed_cost_impl(
handle, vertices, preds, info_weights, out, stop_vertex, num_vertices);
}
template void get_traversed_cost<int32_t, float>(raft::handle_t const& handle,
int32_t const* vertices,
int32_t const* preds,
float const* info_weights,
float* out,
int32_t stop_vertex,
int32_t num_vertices);
template void get_traversed_cost<int32_t, double>(raft::handle_t const& handle,
int32_t const* vertices,
int32_t const* preds,
double const* info_weights,
double* out,
int32_t stop_vertex,
int32_t num_vertices);
template void get_traversed_cost<int64_t, float>(raft::handle_t const& handle,
int64_t const* vertices,
int64_t const* preds,
float const* info_weights,
float* out,
int64_t stop_vertex,
int64_t num_vertices);
template void get_traversed_cost<int64_t, double>(raft::handle_t const& handle,
int64_t const* vertices,
int64_t const* preds,
double const* info_weights,
double* out,
int64_t stop_vertex,
int64_t num_vertices);
} // namespace cugraph
|
SpatialSubSampling.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHTensor.hpp"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHAtomics.cuh"
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
/*
* Description:
* this function subsamples an input 3D tensor along dimensions 1 and 2
* 3D input, 3D output, 1D weight, 1D bias
*/
template <typename Dtype, typename Acctype>
__global__ void subsample(Dtype *input, Dtype *output, Dtype *weight, Dtype *bias,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
output = output + o*output_w*output_h;
input = input + i*input_w*input_h;
// Get the good mask for (k,i) (k out, i in)
Dtype the_weight = weight[k];
// Initialize to the bias
Dtype the_bias = bias[k];
// For all output pixels...
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Compute the mean of the input image...
Dtype *ptr_input = input + yy*dH*input_w + xx*dW;
Dtype *ptr_output = output + yy*output_w + xx;
Acctype sum = 0;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++)
sum += ptr_input[kx];
ptr_input += input_w; // next input line
}
// Update output
*ptr_output = ScalarConvert<Acctype, Dtype>::to(the_weight*sum + the_bias);
}
}
}
/*
* Description:
* this function computes the gradWeight from input and gradOutput
*/
template <typename Dtype, typename Acctype>
__global__ void subgradweight(Dtype *input, Dtype *gradOutput, Dtype *gradWeight, Dtype *gradBias,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW,
float scale)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
input = input + i*input_w*input_h;
// thread ID
int tid = blockDim.x*threadIdx.y + threadIdx.x;
// create array to hold partial sums
__shared__ Acctype sums[CUDA_MAX_THREADS];
sums[tid] = 0;
// compute partial sums
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
Dtype *ptr_input = input + yy*dH*input_w + xx*dW;
Dtype *ptr_gradOutput = gradOutput + yy*output_w + xx;
Dtype z = *ptr_gradOutput;
int64_t kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++) {
sums[tid] += z * ptr_input[kx];
}
ptr_input += input_w;
}
}
}
__syncthreads();
// reduce: accumulate all partial sums to produce final gradWeight
if ((threadIdx.x == 0) && (threadIdx.y == 0)) {
Acctype scaledSums = Acctype(0);
for(int i = 0; i < blockDim.x*blockDim.y; i++) {
scaledSums += scale*sums[i];
}
gradWeight[k] += ScalarConvert<Acctype, Dtype>::to(scaledSums);
}
__syncthreads();
// compute gradBias
sums[tid] = 0;
for (int i=tid; i<output_w*output_h; i+=(blockDim.x*blockDim.y)) {
sums[tid] += gradOutput[i];
}
__syncthreads();
// reduce gradBias
if ((threadIdx.x == 0) && (threadIdx.y == 0)) {
Acctype scaledSums = Acctype(0);
for (int i=0; i<(blockDim.x*blockDim.y); i++) {
scaledSums += scale*sums[i];
}
gradBias[k] += ScalarConvert<Acctype, Dtype>::to(scaledSums);
}
}
/*
* Description:
* this function computes the gradInput from weight and gradOutput
*/
template <typename Dtype>
__global__ void subgradinput(Dtype *gradInput, Dtype *gradOutput, Dtype *weight,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
// get weight
Dtype the_weight = weight[k];
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
Dtype *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
Dtype *ptr_gradOutput = gradOutput + yy*output_w + xx;
Dtype z = *ptr_gradOutput * the_weight;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++) {
// FIXME: should this be done at accreal precision?
ptr_gradInput[kx] += z;
}
ptr_gradInput += input_w;
}
}
}
}
/*
* Description:
* this function computes the gradInput from weight and gradOutput
*/
template <typename Dtype>
__global__ void subgradinputAtomic(Dtype *gradInput, Dtype *gradOutput, Dtype *weight,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
// get weight
Dtype the_weight = weight[k];
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
Dtype *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
Dtype *ptr_gradOutput = gradOutput + yy*output_w + xx;
Dtype z = *ptr_gradOutput * the_weight;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++) {
// FIXME: should this be done at accreal precision?
atomicAdd(&(ptr_gradInput[kx]), z);
}
ptr_gradInput += input_w;
}
}
}
}
#include "generic/SpatialSubSampling.cu"
#include "THHGenerateFloatTypes.h"
#undef CUDA_MAX_THREADS
| SpatialSubSampling.cu | #include "THCUNN.h"
#include "THCTensor.hpp"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCAtomics.cuh"
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
/*
* Description:
* this function subsamples an input 3D tensor along dimensions 1 and 2
* 3D input, 3D output, 1D weight, 1D bias
*/
template <typename Dtype, typename Acctype>
__global__ void subsample(Dtype *input, Dtype *output, Dtype *weight, Dtype *bias,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
output = output + o*output_w*output_h;
input = input + i*input_w*input_h;
// Get the good mask for (k,i) (k out, i in)
Dtype the_weight = weight[k];
// Initialize to the bias
Dtype the_bias = bias[k];
// For all output pixels...
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Compute the mean of the input image...
Dtype *ptr_input = input + yy*dH*input_w + xx*dW;
Dtype *ptr_output = output + yy*output_w + xx;
Acctype sum = 0;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++)
sum += ptr_input[kx];
ptr_input += input_w; // next input line
}
// Update output
*ptr_output = ScalarConvert<Acctype, Dtype>::to(the_weight*sum + the_bias);
}
}
}
/*
* Description:
* this function computes the gradWeight from input and gradOutput
*/
template <typename Dtype, typename Acctype>
__global__ void subgradweight(Dtype *input, Dtype *gradOutput, Dtype *gradWeight, Dtype *gradBias,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW,
float scale)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
input = input + i*input_w*input_h;
// thread ID
int tid = blockDim.x*threadIdx.y + threadIdx.x;
// create array to hold partial sums
__shared__ Acctype sums[CUDA_MAX_THREADS];
sums[tid] = 0;
// compute partial sums
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
Dtype *ptr_input = input + yy*dH*input_w + xx*dW;
Dtype *ptr_gradOutput = gradOutput + yy*output_w + xx;
Dtype z = *ptr_gradOutput;
int64_t kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++) {
sums[tid] += z * ptr_input[kx];
}
ptr_input += input_w;
}
}
}
__syncthreads();
// reduce: accumulate all partial sums to produce final gradWeight
if ((threadIdx.x == 0) && (threadIdx.y == 0)) {
Acctype scaledSums = Acctype(0);
for(int i = 0; i < blockDim.x*blockDim.y; i++) {
scaledSums += scale*sums[i];
}
gradWeight[k] += ScalarConvert<Acctype, Dtype>::to(scaledSums);
}
__syncthreads();
// compute gradBias
sums[tid] = 0;
for (int i=tid; i<output_w*output_h; i+=(blockDim.x*blockDim.y)) {
sums[tid] += gradOutput[i];
}
__syncthreads();
// reduce gradBias
if ((threadIdx.x == 0) && (threadIdx.y == 0)) {
Acctype scaledSums = Acctype(0);
for (int i=0; i<(blockDim.x*blockDim.y); i++) {
scaledSums += scale*sums[i];
}
gradBias[k] += ScalarConvert<Acctype, Dtype>::to(scaledSums);
}
}
/*
* Description:
* this function computes the gradInput from weight and gradOutput
*/
template <typename Dtype>
__global__ void subgradinput(Dtype *gradInput, Dtype *gradOutput, Dtype *weight,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
// get weight
Dtype the_weight = weight[k];
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
Dtype *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
Dtype *ptr_gradOutput = gradOutput + yy*output_w + xx;
Dtype z = *ptr_gradOutput * the_weight;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++) {
// FIXME: should this be done at accreal precision?
ptr_gradInput[kx] += z;
}
ptr_gradInput += input_w;
}
}
}
}
/*
* Description:
* this function computes the gradInput from weight and gradOutput
*/
template <typename Dtype>
__global__ void subgradinputAtomic(Dtype *gradInput, Dtype *gradOutput, Dtype *weight,
int input_n, int input_h, int input_w,
int kH, int kW, int dH, int dW)
{
// iterators
int xx, yy;
// output size
int output_w = (input_w - kW) / dW + 1;
int output_h = (input_h - kH) / dH + 1;
// compute offsets based on thread/block ID
int o = blockIdx.x;
int i = o;
int k = blockIdx.x % input_n;
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o*output_w*output_h;
gradInput = gradInput + i*input_w*input_h;
// get weight
Dtype the_weight = weight[k];
// compute gradInput
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
Dtype *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW;
Dtype *ptr_gradOutput = gradOutput + yy*output_w + xx;
Dtype z = *ptr_gradOutput * the_weight;
int kx, ky;
for(ky = 0; ky < kH; ky++) {
for(kx = 0; kx < kW; kx++) {
// FIXME: should this be done at accreal precision?
atomicAdd(&(ptr_gradInput[kx]), z);
}
ptr_gradInput += input_w;
}
}
}
}
#include "generic/SpatialSubSampling.cu"
#include "THCGenerateFloatTypes.h"
#undef CUDA_MAX_THREADS
|
24071fa446f5d96c031686a5aa1baae8e215611e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
typedef enum {DT_UNDEFINED = 0, DT_INT, DT_FLOAT, DT_STRING, DT_BOOLEAN} DataType;
typedef enum {OP_UNDEFINED = 0, OP_EQUAL_TO, OP_GREATER_THAN, OP_GREATER_THAN_OR_EQUAL_TO, OP_LESS_THAN,
OP_LESS_THAN_OR_EQUAL_TO, OP_LOGICAL_AND, OP_LOGICAL_OR, OP_NOT_EQUAL_TO} Operator;
typedef enum {DL_ERROR = 0, DL_FALSE = 1, DL_TRUE = 2, DL_IGNORE = 3} DLNodeValue;
typedef struct {
DataType type;
int intValue;
float floatValue;
char *stringValue;
int booleanValue;
} Value;
#define OFFSET_SAFETY_MAX 100
__device__ int parseDecisionListNode(char *expression, DLNodeValue *value);
__device__ int parseExpression(char *expression, Value *value);
__device__ int parseBinaryExpression(char *expression, Value *value);
__device__ int parseVariableExpression(char *expression, Value *value);
__device__ int parseBooleanConstant(char *expression, Value *value);
__device__ int parseIntegerConstant(char *expression, Value *value);
__device__ int parseFloatConstant(char *expression, Value *value);
__device__ int parseStringConstant(char *expression, Value *value);
__device__ int parseOperator(char *expression, Operator *op);
__device__ void evaluateBinaryExpression(Value *operand1, Operator op, Value *operand2, Value *returnValue);
__device__ void evaluateIntegerComparison(int op1, Operator op, int op2, Value *value);
__device__ void evaluateFloatComparison(float op1, Operator op, float op2, Value *value);
__device__ void evaluateStringComparison(char *op1, Operator op, char *op2, Value *value);
__device__ void evaluateBooleanComparison(int op1, Operator op, int op2, Value *value);
__device__ int dstrlen(char *str);
__device__ int dstreql(char *str1, char *str2);
extern "C"
__global__ void processDecisionLists(int numExpressions, char **expressions, int *output)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numExpressions) {
char *expression = expressions[idx];
DLNodeValue dlNodeValue;
int offset = parseDecisionListNode(expression, &dlNodeValue);
output[idx] = dlNodeValue;
}
}
/////////////////////////////////////////////////////////////////////
// PARSING FUNCTIONS
//
// NB: All the parseXXX functions return a value that indicates how far the pointer
// should be advanced. The actual return value is in the parameter list.
__device__ int parseDecisionListNode(char *expression, DLNodeValue *dlNodeValue) {
// Currently there are only two valid formats for a DL node:
// <binary expression> <T|F>
// <boolean constant> <T|F>
// In the latter case, the <boolean constant> must always be T since that represents
// the default node. It's redundant to have a condition that always evaluates to true,
// but we keep it anyway because the code to generate, store and evaluate DL's on the
// Java side is much nicer that way.
int offset = 0;
Value value;
offset += parseExpression(expression, &value);
// Check the return from the expression evaluation. If it's false, then we ignore this
// DL node and move on to the next one (so return IGNORE); if true, then we return the
// node's value.
if (value.type != DT_BOOLEAN) {
*dlNodeValue = DL_ERROR;
return 0;
}
if (value.booleanValue == 0) {
*dlNodeValue = DL_IGNORE;
} else {
char nodeValue = *(expression+offset);
if (nodeValue == 'T')
*dlNodeValue = DL_TRUE;
else if (nodeValue == 'F')
*dlNodeValue = DL_FALSE;
else {
*dlNodeValue = DL_ERROR;
return 0;
}
}
return offset;
}
__device__ int parseExpression(char *expression, Value *value) {
int offset = 0;
char c1 = expression[0];
char c2 = expression[1];
offset += 2;
// NB: This is where you'd plug in the code to evaluate additional kinds of expressions
// if you wanted to expand this kernel to be more generic.
if (c1 == 'E' && c2 == 'B')
offset += parseBinaryExpression(expression+offset, value);
else if (c1 == 'E' && c2 == 'V')
offset += parseVariableExpression(expression+offset, value);
else if (c1 == 'C' && c2 == 'B')
offset += parseBooleanConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'I')
offset += parseIntegerConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'F')
offset += parseFloatConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'S')
offset += parseStringConstant(expression+offset, value);
else { // error
value->type = DT_UNDEFINED;
return 0;
}
return offset;
}
__device__ int parseBinaryExpression(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
Value operand1;
Operator op;
Value operand2;
offset += parseExpression(expression+offset, &operand1);
offset += parseOperator(expression+offset, &op);
offset += parseExpression(expression+offset, &operand2);
// Evaluate the binary expression
evaluateBinaryExpression(&operand1, op, &operand2, value);
// Skip over closing }
if (*(expression+offset) != '}') {
value->type = DT_UNDEFINED;
return 0;
}
offset++;
return offset;
}
__device__ int parseVariableExpression(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
char *token = expression+offset;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX)
offset++;
if (offset == OFFSET_SAFETY_MAX)
return 0;
*(expression+offset) = '\0';
offset++;
// TODO: Look up variable in symbol table.
// Of course, to do that we need to *have* a symbol table, so that's first on the list.
return offset;
}
__device__ int parseBooleanConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
if (*(expression+offset) == 'F') {
value->booleanValue = 0;
value->type = DT_BOOLEAN;
} else if (*(expression+offset) == 'T') {
value->booleanValue = 1;
value->type = DT_BOOLEAN;
} else { // error
value->type = DT_UNDEFINED;
return 0;
}
offset++;
// Skip over closing }
if (*(expression+offset) != '}')
return 0;
offset++;
return offset;
}
__device__ int parseIntegerConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
value->intValue = 0;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX) {
value->intValue = value->intValue * 10 + (*(expression+offset) - '0');
offset++;
}
if (offset == OFFSET_SAFETY_MAX)
return 0;
value->type = DT_INT;
offset++;
return offset;
}
__device__ int parseFloatConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (expression[0] != '{')
return 0;
offset++;
if (*(expression+offset) != '0')
return 0;
offset++;
if (*(expression+offset) != '.')
return 0;
offset++;
value->floatValue = 0;
int divisor = 10;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX) {
value->floatValue = value->floatValue + ((float)(*(expression+offset) - '0'))/divisor;
divisor = divisor * 10;
offset++;
}
if (offset == OFFSET_SAFETY_MAX)
return 0;
value->type = DT_FLOAT;
offset++;
return offset;
}
__device__ int parseStringConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
char *token = expression+offset;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX)
offset++;
if (offset == OFFSET_SAFETY_MAX)
return 0;
*(expression+offset) = '\0';
offset++;
value->type = DT_STRING;
value->stringValue = token;
return offset;
}
__device__ int parseOperator(char *expression, Operator *op) {
char c1 = expression[0];
char c2 = expression[1];
if (c1 == '=' && c2 == '=')
*op = OP_EQUAL_TO;
else if (c1 == '>' && c2 == '>')
*op = OP_GREATER_THAN;
else if (c1 == '>' && c2 == '=')
*op = OP_GREATER_THAN_OR_EQUAL_TO;
else if (c1 == '<' && c2 == '<')
*op = OP_LESS_THAN;
else if (c1 == '<' && c2 == '=')
*op = OP_LESS_THAN_OR_EQUAL_TO;
else if (c1 == '&' && c2 == '&')
*op = OP_LOGICAL_AND;
else if (c1 == '|' && c2 == '|')
*op = OP_LOGICAL_OR;
else if (c1 == '!' && c2 == '=')
*op = OP_NOT_EQUAL_TO;
else // error
return 0;
return 2;
}
/////////////////////////////////////////////////////////////////////
// EVALUATION FUNCTIONS
__device__ void evaluateBinaryExpression(Value *operand1, Operator op, Value *operand2, Value *value) {
// Indicate an error by not setting the type on the return value
value->type = DT_UNDEFINED;
// For now only allowing comparison of the same types
if (operand1->type != operand2->type)
return;
switch (operand1->type) {
case DT_INT:
evaluateIntegerComparison(operand1->intValue, op, operand2->intValue, value);
break;
case DT_FLOAT:
evaluateFloatComparison(operand1->floatValue, op, operand2->floatValue, value);
break;
case DT_STRING:
evaluateStringComparison(operand1->stringValue, op, operand2->stringValue, value);
break;
case DT_BOOLEAN:
evaluateBooleanComparison(operand1->booleanValue, op, operand2->booleanValue, value);
break;
default:
case DT_UNDEFINED:
// do nothing
break;
}
}
__device__ void evaluateIntegerComparison(int op1, Operator op, int op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0; // assume comparison is false
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_GREATER_THAN:
if (op1 > op2) bv = 1;
break;
case OP_GREATER_THAN_OR_EQUAL_TO:
if (op1 >= op2) bv = 1;
break;
case OP_LESS_THAN:
if (op1 < op2) bv = 1;
break;
case OP_LESS_THAN_OR_EQUAL_TO:
if (op1 <= op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
__device__ void evaluateFloatComparison(float op1, Operator op, float op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0; // assume comparison is false
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_GREATER_THAN:
if (op1 > op2) bv = 1;
break;
case OP_GREATER_THAN_OR_EQUAL_TO:
if (op1 >= op2) bv = 1;
break;
case OP_LESS_THAN:
if (op1 < op2) bv = 1;
break;
case OP_LESS_THAN_OR_EQUAL_TO:
if (op1 <= op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_UNDEFINED:
return;
}
value->booleanValue = bv;
}
__device__ void evaluateStringComparison(char *op1, Operator op, char *op2, Value *value) {
// Because time is short, we'll have to skimp on the string comparisons
// The greater than and less than operations require a lexical comparison,
// and we don't have access to the standard C library (and thus strcmp()).
// I'm not not going to write my own strcmp() function, so equality is the
// only operation we're going to support for now.
value->type = DT_BOOLEAN;
int bv = 0;
switch (op) {
case OP_EQUAL_TO:
if (dstreql(op1, op2) == 1) bv = 1;
break;
case OP_NOT_EQUAL_TO:
if (dstreql(op1, op2) == 0) bv = 1;
break;
default:
case OP_GREATER_THAN:
case OP_GREATER_THAN_OR_EQUAL_TO:
case OP_LESS_THAN:
case OP_LESS_THAN_OR_EQUAL_TO:
case OP_LOGICAL_AND:
case OP_LOGICAL_OR:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
__device__ void evaluateBooleanComparison(int op1, Operator op, int op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0;
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_GREATER_THAN:
case OP_GREATER_THAN_OR_EQUAL_TO:
case OP_LESS_THAN:
case OP_LESS_THAN_OR_EQUAL_TO:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
/////////////////////////////////////////////////////////////////////
// STRING FUNCTIONS
__device__ int dstrlen(char *str) {
int len = 0;
while (*str != '\0') {
str++;
len++;
}
return len;
}
__device__ int dstreql(char *str1, char *str2) {
while (*str1 == *str2 && *str1 != '\0' && *str2 != '\0') {
str1++;
str2++;
}
if (*str1 == '\0' && *str2 == '\0')
return 1;
return 0;
}
| 24071fa446f5d96c031686a5aa1baae8e215611e.cu | typedef enum {DT_UNDEFINED = 0, DT_INT, DT_FLOAT, DT_STRING, DT_BOOLEAN} DataType;
typedef enum {OP_UNDEFINED = 0, OP_EQUAL_TO, OP_GREATER_THAN, OP_GREATER_THAN_OR_EQUAL_TO, OP_LESS_THAN,
OP_LESS_THAN_OR_EQUAL_TO, OP_LOGICAL_AND, OP_LOGICAL_OR, OP_NOT_EQUAL_TO} Operator;
typedef enum {DL_ERROR = 0, DL_FALSE = 1, DL_TRUE = 2, DL_IGNORE = 3} DLNodeValue;
typedef struct {
DataType type;
int intValue;
float floatValue;
char *stringValue;
int booleanValue;
} Value;
#define OFFSET_SAFETY_MAX 100
__device__ int parseDecisionListNode(char *expression, DLNodeValue *value);
__device__ int parseExpression(char *expression, Value *value);
__device__ int parseBinaryExpression(char *expression, Value *value);
__device__ int parseVariableExpression(char *expression, Value *value);
__device__ int parseBooleanConstant(char *expression, Value *value);
__device__ int parseIntegerConstant(char *expression, Value *value);
__device__ int parseFloatConstant(char *expression, Value *value);
__device__ int parseStringConstant(char *expression, Value *value);
__device__ int parseOperator(char *expression, Operator *op);
__device__ void evaluateBinaryExpression(Value *operand1, Operator op, Value *operand2, Value *returnValue);
__device__ void evaluateIntegerComparison(int op1, Operator op, int op2, Value *value);
__device__ void evaluateFloatComparison(float op1, Operator op, float op2, Value *value);
__device__ void evaluateStringComparison(char *op1, Operator op, char *op2, Value *value);
__device__ void evaluateBooleanComparison(int op1, Operator op, int op2, Value *value);
__device__ int dstrlen(char *str);
__device__ int dstreql(char *str1, char *str2);
extern "C"
__global__ void processDecisionLists(int numExpressions, char **expressions, int *output)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numExpressions) {
char *expression = expressions[idx];
DLNodeValue dlNodeValue;
int offset = parseDecisionListNode(expression, &dlNodeValue);
output[idx] = dlNodeValue;
}
}
/////////////////////////////////////////////////////////////////////
// PARSING FUNCTIONS
//
// NB: All the parseXXX functions return a value that indicates how far the pointer
// should be advanced. The actual return value is in the parameter list.
__device__ int parseDecisionListNode(char *expression, DLNodeValue *dlNodeValue) {
// Currently there are only two valid formats for a DL node:
// <binary expression> <T|F>
// <boolean constant> <T|F>
// In the latter case, the <boolean constant> must always be T since that represents
// the default node. It's redundant to have a condition that always evaluates to true,
// but we keep it anyway because the code to generate, store and evaluate DL's on the
// Java side is much nicer that way.
int offset = 0;
Value value;
offset += parseExpression(expression, &value);
// Check the return from the expression evaluation. If it's false, then we ignore this
// DL node and move on to the next one (so return IGNORE); if true, then we return the
// node's value.
if (value.type != DT_BOOLEAN) {
*dlNodeValue = DL_ERROR;
return 0;
}
if (value.booleanValue == 0) {
*dlNodeValue = DL_IGNORE;
} else {
char nodeValue = *(expression+offset);
if (nodeValue == 'T')
*dlNodeValue = DL_TRUE;
else if (nodeValue == 'F')
*dlNodeValue = DL_FALSE;
else {
*dlNodeValue = DL_ERROR;
return 0;
}
}
return offset;
}
__device__ int parseExpression(char *expression, Value *value) {
int offset = 0;
char c1 = expression[0];
char c2 = expression[1];
offset += 2;
// NB: This is where you'd plug in the code to evaluate additional kinds of expressions
// if you wanted to expand this kernel to be more generic.
if (c1 == 'E' && c2 == 'B')
offset += parseBinaryExpression(expression+offset, value);
else if (c1 == 'E' && c2 == 'V')
offset += parseVariableExpression(expression+offset, value);
else if (c1 == 'C' && c2 == 'B')
offset += parseBooleanConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'I')
offset += parseIntegerConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'F')
offset += parseFloatConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'S')
offset += parseStringConstant(expression+offset, value);
else { // error
value->type = DT_UNDEFINED;
return 0;
}
return offset;
}
__device__ int parseBinaryExpression(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
Value operand1;
Operator op;
Value operand2;
offset += parseExpression(expression+offset, &operand1);
offset += parseOperator(expression+offset, &op);
offset += parseExpression(expression+offset, &operand2);
// Evaluate the binary expression
evaluateBinaryExpression(&operand1, op, &operand2, value);
// Skip over closing }
if (*(expression+offset) != '}') {
value->type = DT_UNDEFINED;
return 0;
}
offset++;
return offset;
}
__device__ int parseVariableExpression(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
char *token = expression+offset;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX)
offset++;
if (offset == OFFSET_SAFETY_MAX)
return 0;
*(expression+offset) = '\0';
offset++;
// TODO: Look up variable in symbol table.
// Of course, to do that we need to *have* a symbol table, so that's first on the list.
return offset;
}
__device__ int parseBooleanConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
if (*(expression+offset) == 'F') {
value->booleanValue = 0;
value->type = DT_BOOLEAN;
} else if (*(expression+offset) == 'T') {
value->booleanValue = 1;
value->type = DT_BOOLEAN;
} else { // error
value->type = DT_UNDEFINED;
return 0;
}
offset++;
// Skip over closing }
if (*(expression+offset) != '}')
return 0;
offset++;
return offset;
}
__device__ int parseIntegerConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
value->intValue = 0;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX) {
value->intValue = value->intValue * 10 + (*(expression+offset) - '0');
offset++;
}
if (offset == OFFSET_SAFETY_MAX)
return 0;
value->type = DT_INT;
offset++;
return offset;
}
__device__ int parseFloatConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (expression[0] != '{')
return 0;
offset++;
if (*(expression+offset) != '0')
return 0;
offset++;
if (*(expression+offset) != '.')
return 0;
offset++;
value->floatValue = 0;
int divisor = 10;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX) {
value->floatValue = value->floatValue + ((float)(*(expression+offset) - '0'))/divisor;
divisor = divisor * 10;
offset++;
}
if (offset == OFFSET_SAFETY_MAX)
return 0;
value->type = DT_FLOAT;
offset++;
return offset;
}
__device__ int parseStringConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
char *token = expression+offset;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX)
offset++;
if (offset == OFFSET_SAFETY_MAX)
return 0;
*(expression+offset) = '\0';
offset++;
value->type = DT_STRING;
value->stringValue = token;
return offset;
}
__device__ int parseOperator(char *expression, Operator *op) {
char c1 = expression[0];
char c2 = expression[1];
if (c1 == '=' && c2 == '=')
*op = OP_EQUAL_TO;
else if (c1 == '>' && c2 == '>')
*op = OP_GREATER_THAN;
else if (c1 == '>' && c2 == '=')
*op = OP_GREATER_THAN_OR_EQUAL_TO;
else if (c1 == '<' && c2 == '<')
*op = OP_LESS_THAN;
else if (c1 == '<' && c2 == '=')
*op = OP_LESS_THAN_OR_EQUAL_TO;
else if (c1 == '&' && c2 == '&')
*op = OP_LOGICAL_AND;
else if (c1 == '|' && c2 == '|')
*op = OP_LOGICAL_OR;
else if (c1 == '!' && c2 == '=')
*op = OP_NOT_EQUAL_TO;
else // error
return 0;
return 2;
}
/////////////////////////////////////////////////////////////////////
// EVALUATION FUNCTIONS
__device__ void evaluateBinaryExpression(Value *operand1, Operator op, Value *operand2, Value *value) {
// Indicate an error by not setting the type on the return value
value->type = DT_UNDEFINED;
// For now only allowing comparison of the same types
if (operand1->type != operand2->type)
return;
switch (operand1->type) {
case DT_INT:
evaluateIntegerComparison(operand1->intValue, op, operand2->intValue, value);
break;
case DT_FLOAT:
evaluateFloatComparison(operand1->floatValue, op, operand2->floatValue, value);
break;
case DT_STRING:
evaluateStringComparison(operand1->stringValue, op, operand2->stringValue, value);
break;
case DT_BOOLEAN:
evaluateBooleanComparison(operand1->booleanValue, op, operand2->booleanValue, value);
break;
default:
case DT_UNDEFINED:
// do nothing
break;
}
}
__device__ void evaluateIntegerComparison(int op1, Operator op, int op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0; // assume comparison is false
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_GREATER_THAN:
if (op1 > op2) bv = 1;
break;
case OP_GREATER_THAN_OR_EQUAL_TO:
if (op1 >= op2) bv = 1;
break;
case OP_LESS_THAN:
if (op1 < op2) bv = 1;
break;
case OP_LESS_THAN_OR_EQUAL_TO:
if (op1 <= op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
__device__ void evaluateFloatComparison(float op1, Operator op, float op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0; // assume comparison is false
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_GREATER_THAN:
if (op1 > op2) bv = 1;
break;
case OP_GREATER_THAN_OR_EQUAL_TO:
if (op1 >= op2) bv = 1;
break;
case OP_LESS_THAN:
if (op1 < op2) bv = 1;
break;
case OP_LESS_THAN_OR_EQUAL_TO:
if (op1 <= op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_UNDEFINED:
return;
}
value->booleanValue = bv;
}
__device__ void evaluateStringComparison(char *op1, Operator op, char *op2, Value *value) {
// Because time is short, we'll have to skimp on the string comparisons
// The greater than and less than operations require a lexical comparison,
// and we don't have access to the standard C library (and thus strcmp()).
// I'm not not going to write my own strcmp() function, so equality is the
// only operation we're going to support for now.
value->type = DT_BOOLEAN;
int bv = 0;
switch (op) {
case OP_EQUAL_TO:
if (dstreql(op1, op2) == 1) bv = 1;
break;
case OP_NOT_EQUAL_TO:
if (dstreql(op1, op2) == 0) bv = 1;
break;
default:
case OP_GREATER_THAN:
case OP_GREATER_THAN_OR_EQUAL_TO:
case OP_LESS_THAN:
case OP_LESS_THAN_OR_EQUAL_TO:
case OP_LOGICAL_AND:
case OP_LOGICAL_OR:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
__device__ void evaluateBooleanComparison(int op1, Operator op, int op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0;
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_GREATER_THAN:
case OP_GREATER_THAN_OR_EQUAL_TO:
case OP_LESS_THAN:
case OP_LESS_THAN_OR_EQUAL_TO:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
/////////////////////////////////////////////////////////////////////
// STRING FUNCTIONS
__device__ int dstrlen(char *str) {
int len = 0;
while (*str != '\0') {
str++;
len++;
}
return len;
}
__device__ int dstreql(char *str1, char *str2) {
while (*str1 == *str2 && *str1 != '\0' && *str2 != '\0') {
str1++;
str2++;
}
if (*str1 == '\0' && *str2 == '\0')
return 1;
return 0;
}
|
d6c9a91bf8281a0a4a3fd2a78c6a2b60a02c5224.hip | // !!! This is a file automatically generated by hipify!!!
/***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
***********************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#define MAX_THREADS_PER_BLOCK 512
#include "../../common/polybenchUtilFuncts.h"
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
#include "kernel.hip"
#include "kernel2.cu"
void BFSGraph(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
double t_start, t_end;
no_of_nodes=0;
edge_list_size=0;
t_start = rtclock();
BFSGraph( argc, argv);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
void BFSGraph( int argc, char** argv)
{
char *input_f;
if(argc!=2){
Usage(argc, argv);
exit(0);
}
input_f = argv[1];
printf("Reading File\n");
//Read in Graph from a file
fp = fopen(input_f,"r");
if(!fp)
{
printf("Error Reading graph file\n");
return;
}
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// allocate host memory
Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
int start, edgeno;
// initalize the memory
for( unsigned int i = 0; i < no_of_nodes; i++)
{
fscanf(fp,"%d %d",&start,&edgeno);
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
h_graph_mask[i]=false;
h_updating_graph_mask[i]=false;
h_graph_visited[i]=false;
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as true in the mask
h_graph_mask[source]=true;
h_graph_visited[source]=true;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++)
{
fscanf(fp,"%d",&id);
fscanf(fp,"%d",&cost);
h_graph_edges[i] = id;
}
if(fp)
fclose(fp);
printf("Read File\n");
//Copy the Node list to device memory
Node* d_graph_nodes;
hipMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
hipMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, hipMemcpyHostToDevice) ;
//Copy the Edge List to device Memory
int* d_graph_edges;
hipMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
hipMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, hipMemcpyHostToDevice) ;
//Copy the Mask to device memory
bool* d_graph_mask;
hipMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
bool* d_updating_graph_mask;
hipMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
//Copy the Visited nodes array to device memory
bool* d_graph_visited;
hipMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
// allocate mem for the result on host side
int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++)
h_cost[i]=-1;
h_cost[source]=0;
// allocate device memory for result
int* d_cost;
hipMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes);
hipMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, hipMemcpyHostToDevice) ;
//make a bool to check if the execution is over
bool *d_over;
hipMalloc( (void**) &d_over, sizeof(bool));
printf("Copied Everything to GPU memory\n");
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
int k=0;
printf("Start traversing the tree\n");
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
hipMemcpy( d_over, &stop, sizeof(bool), hipMemcpyHostToDevice) ;
hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads), 0 , 0, d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);
// check if kernel execution generated and error
hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads), 0 , 0, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
hipMemcpy( &stop, d_over, sizeof(bool), hipMemcpyDeviceToHost) ;
k++;
}
while(stop);
printf("Kernel Executed %d times\n",k);
// copy result from device to host
hipMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, hipMemcpyDeviceToHost) ;
//Store the result into a file
FILE *fpo = fopen("result.txt","w");
for(int i=0;i<no_of_nodes;i++)
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
fclose(fpo);
printf("Result stored in result.txt\n");
// cleanup memory
free( h_graph_nodes);
free( h_graph_edges);
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
hipFree(d_graph_nodes);
hipFree(d_graph_edges);
hipFree(d_graph_mask);
hipFree(d_updating_graph_mask);
hipFree(d_graph_visited);
hipFree(d_cost);
}
| d6c9a91bf8281a0a4a3fd2a78c6a2b60a02c5224.cu | /***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
***********************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#define MAX_THREADS_PER_BLOCK 512
#include "../../common/polybenchUtilFuncts.h"
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
#include "kernel.cu"
#include "kernel2.cu"
void BFSGraph(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
double t_start, t_end;
no_of_nodes=0;
edge_list_size=0;
t_start = rtclock();
BFSGraph( argc, argv);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
void BFSGraph( int argc, char** argv)
{
char *input_f;
if(argc!=2){
Usage(argc, argv);
exit(0);
}
input_f = argv[1];
printf("Reading File\n");
//Read in Graph from a file
fp = fopen(input_f,"r");
if(!fp)
{
printf("Error Reading graph file\n");
return;
}
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// allocate host memory
Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
int start, edgeno;
// initalize the memory
for( unsigned int i = 0; i < no_of_nodes; i++)
{
fscanf(fp,"%d %d",&start,&edgeno);
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
h_graph_mask[i]=false;
h_updating_graph_mask[i]=false;
h_graph_visited[i]=false;
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as true in the mask
h_graph_mask[source]=true;
h_graph_visited[source]=true;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++)
{
fscanf(fp,"%d",&id);
fscanf(fp,"%d",&cost);
h_graph_edges[i] = id;
}
if(fp)
fclose(fp);
printf("Read File\n");
//Copy the Node list to device memory
Node* d_graph_nodes;
cudaMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
cudaMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, cudaMemcpyHostToDevice) ;
//Copy the Edge List to device Memory
int* d_graph_edges;
cudaMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
cudaMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, cudaMemcpyHostToDevice) ;
//Copy the Mask to device memory
bool* d_graph_mask;
cudaMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
bool* d_updating_graph_mask;
cudaMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
//Copy the Visited nodes array to device memory
bool* d_graph_visited;
cudaMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
// allocate mem for the result on host side
int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++)
h_cost[i]=-1;
h_cost[source]=0;
// allocate device memory for result
int* d_cost;
cudaMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes);
cudaMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, cudaMemcpyHostToDevice) ;
//make a bool to check if the execution is over
bool *d_over;
cudaMalloc( (void**) &d_over, sizeof(bool));
printf("Copied Everything to GPU memory\n");
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
int k=0;
printf("Start traversing the tree\n");
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
cudaMemcpy( d_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ;
Kernel<<< grid, threads, 0 >>>( d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);
// check if kernel execution generated and error
Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
cudaMemcpy( &stop, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ;
k++;
}
while(stop);
printf("Kernel Executed %d times\n",k);
// copy result from device to host
cudaMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, cudaMemcpyDeviceToHost) ;
//Store the result into a file
FILE *fpo = fopen("result.txt","w");
for(int i=0;i<no_of_nodes;i++)
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
fclose(fpo);
printf("Result stored in result.txt\n");
// cleanup memory
free( h_graph_nodes);
free( h_graph_edges);
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
cudaFree(d_graph_nodes);
cudaFree(d_graph_edges);
cudaFree(d_graph_mask);
cudaFree(d_updating_graph_mask);
cudaFree(d_graph_visited);
cudaFree(d_cost);
}
|
4fa2f2e955c3f306de6bf0b669e5beb4739efa02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <assert.h>
// Max-min normalization of input data with m samples and d features
void min_max_normalize(double *xs, int m, int d)
{
for (int x = 0; x < d; ++x) {
// calculate std for each column
double min = xs[x*d + 0];
double max = xs[x*d + 0];
for (int y = d; y < m*d; ++y) {
double val = xs[x*d + y];
if (val < min) {
min = val;
} else if (val > max) {
max = val;
}
}
for (int y = 0; y < m*d; ++y) {
double val = xs[x*d + y];
xs[x*d + y] = (val - min) / (max-min);
}
}
}
// GPU function for calculating the hypothesis function and individual gradient update for each feature of each sample
__global__ void map(int m, double *xs, double *ys, double *params, double *gradvec, int d){ // m is the no. of samples and d is the number of features in xs(input data)
//double *h;
//hipMalloc (&h, m*sizeof(float));
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index<m){
double accum = params[0];
//double accum = 0.0;
for (int j=0; j<d; j++){
accum += xs[index*(d-1)+j] * params[j+1];
}
double h = 1.0/ (1.0 + exp(-accum));
gradvec[index*d+0] = (h - ys[index]) * 1;
for (int j = 1; j < d; j++){
gradvec[index*d+j] = (h - ys[index]) * xs[index*d+j];
}
}
}
#define WARPSIZE 32
__device__ inline double atomicAddDouble(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ inline double __shfl_double(double d, int lane) {
// Split the double number into 2 32b registers.
int lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(d));
// Shuffle the two 32b registers.
lo = __shfl(lo, lane);
hi = __shfl(hi, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d) : "r"(lo), "r"(hi));
return d;
}
__device__ inline double warpReduceSum(double val) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
val += __shfl_double(val, (i + offset) % WARPSIZE);
}
return val;
}
__device__ inline double4 __shfl_double4(double4 d, int lane) {
// Split the double number into 2 32b registers.
int lox, loy, loz, low, hix, hiy, hiz, hiw;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lox), "=r"(hix) : "d"(d.x));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loy), "=r"(hiy) : "d"(d.y));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loz), "=r"(hiz) : "d"(d.z));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(low), "=r"(hiw) : "d"(d.w));
// Shuffle the two 32b registers.
lox = __shfl(lox, lane);
hix = __shfl(hix, lane);
loy = __shfl(loy, lane);
hiy = __shfl(hiy, lane);
loz = __shfl(loz, lane);
hiz = __shfl(hiz, lane);
low = __shfl(low, lane);
hiw = __shfl(hiw, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.x) : "r"(lox), "r"(hix));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.y) : "r"(loy), "r"(hiy));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.z) : "r"(loz), "r"(hiz));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.w) : "r"(low), "r"(hiw));
return d;
}
__device__ inline double4 warpReduceVSum(double4 val4) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
double4 shiftedVal4 = __shfl_double4(val4, (i + offset) % WARPSIZE);
val4.x += shiftedVal4.x;
val4.y += shiftedVal4.y;
val4.z += shiftedVal4.z;
val4.w += shiftedVal4.w;
}
return val4;
}
__device__ double* deviceReduceKernelj(double * inArray, double *out, long i, long n, long length) {
double sum = 0;
double *inArrayBody;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for (long idx = index; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum += inArrayBody[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (WARPSIZE -1)) == 0){
atomicAddDouble(out, sum);
}
return out;
}
__device__ void deviceReduceArrayKernelj(double * inArray, double *outputArrayBody, long length, long n) {
long i = 0;
double *inArrayBody;
// unrolled version
while ((length - i) >= 4) {
double4 sum4;
sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum4.x += inArrayBody[i];
sum4.y += inArrayBody[i+1];
sum4.z += inArrayBody[i+2];
sum4.w += inArrayBody[i+3];
}
sum4 = warpReduceVSum(sum4);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
double *outx = &outputArrayBody[i];
double *outy = &outputArrayBody[i+1];
double *outz = &outputArrayBody[i+2];
double *outw = &outputArrayBody[i+3];
atomicAddDouble(outx, sum4.x);
atomicAddDouble(outy, sum4.y);
atomicAddDouble(outz, sum4.z);
atomicAddDouble(outw, sum4.w);
}
i += 4;
}
for (; i < length; i++) {
deviceReduceKernelj(inArray, &outputArrayBody[i], i, n, length);
}
}
// Finds the final gradient by summing up the element-wise gradients columnwise
extern "C"
__global__
void reducegrad(double *gradvec, double * sumgradvec, int m, int d) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < m)
deviceReduceArrayKernelj(gradvec, sumgradvec, d, m);
}
//Updates the weights/parameters based on the gradients
//alpha is the learning rate and lambda is the regularization parameter
void updateweight (double *params, double *sumgradvec, int m, int d, float alpha, float lambda){
for (int i=0; i<d; i++){
params[i] = params[i] - alpha * (sumgradvec[i]) - lambda * alpha * params[i];
}
}
#define num_iterations 2
# include<time.h>
int main(){
clock_t start, end;
double time_used;
//Initialize number of samples and features
int m = 1000;
int d = 401;
//Allocate host memory variables
size_t size1 = m*d*sizeof(double);
size_t size2 = m*sizeof(double);
size_t size3 = d*sizeof(double);
double *xs;
double *ys;
double *params;
double *sumgradvechost;
double *gradvec1;
xs = (double*)malloc(size1);
ys = (double*)malloc(size2);
params = (double*)malloc(size3);
sumgradvechost = (double*)malloc(size3);
gradvec1 = (double*)malloc(size1);
//Read input data from file
FILE *fp, *fp1;
fp = fopen ("input", "r");
if (!fp){
printf ("Unable to open file!");
return 1;
}
for (int i=0; i<m; i++){
for (int j=0; j<d-1; j++){
fscanf(fp, "%lf", &xs[i*(d-1) + j]);
}
fscanf(fp, "%lf", &ys[i]);
}
fclose(fp);
//Initialize weights
for (int i=0; i<d; i++){
params[i] = 0.0;
}
// Print first 10 rows of input data
for (int i=0; i<20; i+=2) {
printf("%lf %lf => %lf \n", xs[i], xs[i+1], ys[i/2]);
}
/* //Max-min mormalize input data
min_max_normalize(xs, m, d);
//Print first 10 rows of input data after normalization
for (int i=0; i<20; i+=2) {
printf("%lf %lf => %lf \n", xs[i], xs[i+1], ys[i/2]);
}
*/
//Allocate variables in device memory
double *gpu_params;
double *gpu_xs;
double *gpu_ys;
double *gradvec;
double *sumgradvec;
hipMalloc (&gpu_params, size3);
hipMalloc(&gpu_xs, size1);
hipMalloc(&gpu_ys, size2);
hipMalloc(&gradvec, size1);
hipMalloc(&sumgradvec, size3);
//Copy vectors from host memory to device memory
hipError_t err = hipMemcpy(gpu_xs, xs, size1, hipMemcpyHostToDevice);
if ( hipSuccess != err) {
printf( "Error : %s \n", hipGetErrorString( err ) );
return;
}
hipMemcpy(gpu_ys, ys, size2, hipMemcpyHostToDevice);
hipMemcpy(gpu_params, params, size3, hipMemcpyHostToDevice);
for (int i=0; i<num_iterations; i++){
start = clock();
//Compute hypothesis function and element-wise gradients
hipLaunchKernelGGL(( map), dim3(1000),dim3(1024), 0, 0, m, gpu_xs, gpu_ys, gpu_params, gradvec, d);
end = clock();
time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f \n", time_used);
//Copy the element wise gradients from GPU to CPU
//hipMemcpy(gradvec1, gradvec, size1, hipMemcpyDeviceToHost);
//Compute sum of all grad vector in GPU
start = clock();
hipLaunchKernelGGL(( reducegrad), dim3(1000),dim3(1024), 0, 0, gradvec, sumgradvec, m, d);
end = clock();
time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f \n", time_used);
start = clock();
//Copy out grad's vector from GPU to CPU
hipMemcpy (sumgradvechost, sumgradvec, sizeof(double)*d, hipMemcpyDeviceToHost);
end = clock();
time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f \n", time_used);
//Update weights in CPU. The learning rate is 0.001 and regualrization parameter is 10.
start = clock();
updateweight(params, sumgradvechost, m, d, 0.001, 10);
end = clock();
time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f \n", time_used);
//Print current learned weights
for (int j=0; j<d; j++){
printf("%lf \t", params[j]); }
printf("\n");
// Copy in the updated weights back to GPU
start = clock();
hipMemcpy (gpu_params, params, sizeof(double) * d, hipMemcpyHostToDevice);
end = clock();
time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f \n", time_used);
}
//Compute the predictions on the training data from the developed model
double predict[m];
for (int index=0; index<m; index++){
predict[index] = params[0];
for (int j=0; j<d; j++){
predict[index] += xs[index*(d-1)+j] * params[j+1];
}
}
//Compute the error for the model based on the percentage of true positives
double error = 0.0;
for (int i=0; i<m; i++){
int tmp = 0;
if ((1/( 1 + exp(-predict[i]))) >= 0.5) tmp = 1; else tmp = 0;
if (tmp != ys[i])
error ++;
}
error = error / m;
printf("%lf \n", error);
//Dump the prediction output to a file
fp1 = fopen("output", "w");
for (int i=0; i<m; i++){
fprintf(fp1, "%lf \n", 1 / (1 + exp(-predict[i])));
}
}
| 4fa2f2e955c3f306de6bf0b669e5beb4739efa02.cu | #include<stdio.h>
#include <stdlib.h>
#include <math.h>
#include <curand.h>
#include <curand_kernel.h>
#include <assert.h>
// Max-min normalization of input data with m samples and d features
void min_max_normalize(double *xs, int m, int d)
{
for (int x = 0; x < d; ++x) {
// calculate std for each column
double min = xs[x*d + 0];
double max = xs[x*d + 0];
for (int y = d; y < m*d; ++y) {
double val = xs[x*d + y];
if (val < min) {
min = val;
} else if (val > max) {
max = val;
}
}
for (int y = 0; y < m*d; ++y) {
double val = xs[x*d + y];
xs[x*d + y] = (val - min) / (max-min);
}
}
}
// GPU function for calculating the hypothesis function and individual gradient update for each feature of each sample
__global__ void map(int m, double *xs, double *ys, double *params, double *gradvec, int d){ // m is the no. of samples and d is the number of features in xs(input data)
//double *h;
//cudaMalloc (&h, m*sizeof(float));
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index<m){
double accum = params[0];
//double accum = 0.0;
for (int j=0; j<d; j++){
accum += xs[index*(d-1)+j] * params[j+1];
}
double h = 1.0/ (1.0 + exp(-accum));
gradvec[index*d+0] = (h - ys[index]) * 1;
for (int j = 1; j < d; j++){
gradvec[index*d+j] = (h - ys[index]) * xs[index*d+j];
}
}
}
#define WARPSIZE 32
__device__ inline double atomicAddDouble(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ inline double __shfl_double(double d, int lane) {
// Split the double number into 2 32b registers.
int lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(d));
// Shuffle the two 32b registers.
lo = __shfl(lo, lane);
hi = __shfl(hi, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d) : "r"(lo), "r"(hi));
return d;
}
__device__ inline double warpReduceSum(double val) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
val += __shfl_double(val, (i + offset) % WARPSIZE);
}
return val;
}
__device__ inline double4 __shfl_double4(double4 d, int lane) {
// Split the double number into 2 32b registers.
int lox, loy, loz, low, hix, hiy, hiz, hiw;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lox), "=r"(hix) : "d"(d.x));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loy), "=r"(hiy) : "d"(d.y));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loz), "=r"(hiz) : "d"(d.z));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(low), "=r"(hiw) : "d"(d.w));
// Shuffle the two 32b registers.
lox = __shfl(lox, lane);
hix = __shfl(hix, lane);
loy = __shfl(loy, lane);
hiy = __shfl(hiy, lane);
loz = __shfl(loz, lane);
hiz = __shfl(hiz, lane);
low = __shfl(low, lane);
hiw = __shfl(hiw, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.x) : "r"(lox), "r"(hix));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.y) : "r"(loy), "r"(hiy));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.z) : "r"(loz), "r"(hiz));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.w) : "r"(low), "r"(hiw));
return d;
}
__device__ inline double4 warpReduceVSum(double4 val4) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
double4 shiftedVal4 = __shfl_double4(val4, (i + offset) % WARPSIZE);
val4.x += shiftedVal4.x;
val4.y += shiftedVal4.y;
val4.z += shiftedVal4.z;
val4.w += shiftedVal4.w;
}
return val4;
}
__device__ double* deviceReduceKernelj(double * inArray, double *out, long i, long n, long length) {
double sum = 0;
double *inArrayBody;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for (long idx = index; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum += inArrayBody[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (WARPSIZE -1)) == 0){
atomicAddDouble(out, sum);
}
return out;
}
__device__ void deviceReduceArrayKernelj(double * inArray, double *outputArrayBody, long length, long n) {
long i = 0;
double *inArrayBody;
// unrolled version
while ((length - i) >= 4) {
double4 sum4;
sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum4.x += inArrayBody[i];
sum4.y += inArrayBody[i+1];
sum4.z += inArrayBody[i+2];
sum4.w += inArrayBody[i+3];
}
sum4 = warpReduceVSum(sum4);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
double *outx = &outputArrayBody[i];
double *outy = &outputArrayBody[i+1];
double *outz = &outputArrayBody[i+2];
double *outw = &outputArrayBody[i+3];
atomicAddDouble(outx, sum4.x);
atomicAddDouble(outy, sum4.y);
atomicAddDouble(outz, sum4.z);
atomicAddDouble(outw, sum4.w);
}
i += 4;
}
for (; i < length; i++) {
deviceReduceKernelj(inArray, &outputArrayBody[i], i, n, length);
}
}
// Finds the final gradient by summing up the element-wise gradients columnwise
extern "C"
__global__
void reducegrad(double *gradvec, double * sumgradvec, int m, int d) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < m)
deviceReduceArrayKernelj(gradvec, sumgradvec, d, m);
}
//Updates the weights/parameters based on the gradients
//alpha is the learning rate and lambda is the regularization parameter
void updateweight (double *params, double *sumgradvec, int m, int d, float alpha, float lambda){
for (int i=0; i<d; i++){
params[i] = params[i] - alpha * (sumgradvec[i]) - lambda * alpha * params[i];
}
}
#define num_iterations 2
# include<time.h>
int main(){
clock_t start, end;
double time_used;
//Initialize number of samples and features
int m = 1000;
int d = 401;
//Allocate host memory variables
size_t size1 = m*d*sizeof(double);
size_t size2 = m*sizeof(double);
size_t size3 = d*sizeof(double);
double *xs;
double *ys;
double *params;
double *sumgradvechost;
double *gradvec1;
xs = (double*)malloc(size1);
ys = (double*)malloc(size2);
params = (double*)malloc(size3);
sumgradvechost = (double*)malloc(size3);
gradvec1 = (double*)malloc(size1);
//Read input data from file
FILE *fp, *fp1;
fp = fopen ("input", "r");
if (!fp){
printf ("Unable to open file!");
return 1;
}
for (int i=0; i<m; i++){
for (int j=0; j<d-1; j++){
fscanf(fp, "%lf", &xs[i*(d-1) + j]);
}
fscanf(fp, "%lf", &ys[i]);
}
fclose(fp);
//Initialize weights
for (int i=0; i<d; i++){
params[i] = 0.0;
}
// Print first 10 rows of input data
for (int i=0; i<20; i+=2) {
printf("%lf %lf => %lf \n", xs[i], xs[i+1], ys[i/2]);
}
/* //Max-min mormalize input data
min_max_normalize(xs, m, d);
//Print first 10 rows of input data after normalization
for (int i=0; i<20; i+=2) {
printf("%lf %lf => %lf \n", xs[i], xs[i+1], ys[i/2]);
}
*/
//Allocate variables in device memory
double *gpu_params;
double *gpu_xs;
double *gpu_ys;
double *gradvec;
double *sumgradvec;
cudaMalloc (&gpu_params, size3);
cudaMalloc(&gpu_xs, size1);
cudaMalloc(&gpu_ys, size2);
cudaMalloc(&gradvec, size1);
cudaMalloc(&sumgradvec, size3);
//Copy vectors from host memory to device memory
cudaError err = cudaMemcpy(gpu_xs, xs, size1, cudaMemcpyHostToDevice);
if ( cudaSuccess != err) {
printf( "Error : %s \n", cudaGetErrorString( err ) );
return;
}
cudaMemcpy(gpu_ys, ys, size2, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_params, params, size3, cudaMemcpyHostToDevice);
for (int i=0; i<num_iterations; i++){
start = clock();
//Compute hypothesis function and element-wise gradients
map<<<1000,1024>>>(m, gpu_xs, gpu_ys, gpu_params, gradvec, d);
end = clock();
time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f \n", time_used);
//Copy the element wise gradients from GPU to CPU
//cudaMemcpy(gradvec1, gradvec, size1, cudaMemcpyDeviceToHost);
//Compute sum of all grad vector in GPU
start = clock();
reducegrad<<<1000,1024>>>(gradvec, sumgradvec, m, d);
end = clock();
time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f \n", time_used);
start = clock();
//Copy out grad's vector from GPU to CPU
cudaMemcpy (sumgradvechost, sumgradvec, sizeof(double)*d, cudaMemcpyDeviceToHost);
end = clock();
time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f \n", time_used);
//Update weights in CPU. The learning rate is 0.001 and regualrization parameter is 10.
start = clock();
updateweight(params, sumgradvechost, m, d, 0.001, 10);
end = clock();
time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f \n", time_used);
//Print current learned weights
for (int j=0; j<d; j++){
printf("%lf \t", params[j]); }
printf("\n");
// Copy in the updated weights back to GPU
start = clock();
cudaMemcpy (gpu_params, params, sizeof(double) * d, cudaMemcpyHostToDevice);
end = clock();
time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f \n", time_used);
}
//Compute the predictions on the training data from the developed model
double predict[m];
for (int index=0; index<m; index++){
predict[index] = params[0];
for (int j=0; j<d; j++){
predict[index] += xs[index*(d-1)+j] * params[j+1];
}
}
//Compute the error for the model based on the percentage of true positives
double error = 0.0;
for (int i=0; i<m; i++){
int tmp = 0;
if ((1/( 1 + exp(-predict[i]))) >= 0.5) tmp = 1; else tmp = 0;
if (tmp != ys[i])
error ++;
}
error = error / m;
printf("%lf \n", error);
//Dump the prediction output to a file
fp1 = fopen("output", "w");
for (int i=0; i<m; i++){
fprintf(fp1, "%lf \n", 1 / (1 + exp(-predict[i])));
}
}
|
c5231f9d8900c245106770610ca1bc241e36dd26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/conv_shift_op.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
namespace {
inline int DivUp(int x, int y) { return (x + y - 1) / y; }
// Some notes on the design:
//
// Each thread is responsible for computing a single output out[k, i].
// Thread blocks are based on tiles of x with height 1 in the batch dimension.
//
// This design is based on the typical use case where the filter
// y is fairly small. For large y, it would probably be more efficient
// to also tile across y.
template <typename T>
__global__ void ConvShiftForward(const T *x,
const T *y,
int x_width,
int y_width,
int y_half_width,
int batch_size,
T *out) {
extern __shared__ T mem[];
int tx = threadIdx.x;
int i = blockIdx.x * blockDim.x + tx; // global x index
int k = blockIdx.y; // batch index
// Check if we are in a boundary block with fewer x's to process than
// blockDim.x.
int num_x =
(blockIdx.x == gridDim.x - 1) ? (x_width % blockDim.x) : blockDim.x;
T *sx = mem;
T *sx_pad = &mem[num_x];
T *sy = &mem[blockDim.x + y_width];
// Collaboratively load y[k, :] and length-y padding of x into shared memory.
int pad_start = blockIdx.x * blockDim.x + num_x + x_width - y_half_width;
for (int j = tx; j < y_width; j += blockDim.x) {
sy[j] = y[k * y_width + j];
sx_pad[j] = x[k * x_width + (pad_start + j) % x_width];
}
// Load a cyclically shifted slice of x into shared memory.
if (tx < num_x) {
int load_i = (i - y_half_width + x_width) % x_width;
sx[tx] = x[k * x_width + load_i];
}
__syncthreads();
if (tx < num_x) {
// Compute dot product of sx[tx:tx + y_width] and sy.
T sum = 0;
for (int j = 0; j < y_width; ++j) {
sum += sx[tx + j] * sy[j];
}
// Save to out[k, i].
out[k * x_width + i] = sum;
}
}
// Compute x gradient - initial naive implementation with atomic add.
template <typename T>
__global__ void ConvShiftGradX(const T *dout,
const T *y,
int x_width,
int y_width,
int y_half_width,
int batch_size,
T *dx) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // x index
int j = blockIdx.y; // y index
int k = blockIdx.z; // batch index
if (i < x_width) {
int index = (i + j - y_half_width + x_width) % x_width;
atomicAdd(&dx[k * x_width + index],
dout[k * x_width + i] * y[k * y_width + j]);
}
}
// Compute y gradient - initial naive implementation with atomic add.
template <typename T>
__global__ void ConvShiftDy(const T *x,
const T *dout,
int x_width,
int y_width,
int y_half_width,
int batch_size,
T *dy) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // x index
int j = blockIdx.y; // y index
int k = blockIdx.z; // batch index
if (i < x_width) {
int index = (i + j - y_half_width + x_width) % x_width;
atomicAdd(&dy[k * y_width + j],
x[k * x_width + index] * dout[k * x_width + i]);
}
}
} // namespace
template <typename T>
class ConvShiftKernel<T, phi::GPUContext> : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const phi::DenseTensor *X = context.Input<phi::DenseTensor>("X");
const phi::DenseTensor *Y = context.Input<phi::DenseTensor>("Y");
phi::DenseTensor *Out = context.Output<phi::DenseTensor>("Out");
const T *x_data = X->data<T>();
const T *y_data = Y->data<T>();
T *out_data = Out->mutable_data<T>(context.GetPlace());
int batch_size = X->dims()[0];
int x_width = X->dims()[1];
int y_width = Y->dims()[1];
int y_half_width = (y_width - 1) / 2;
const int x_per_block = 256;
int num_x_blocks = DivUp(x_width, x_per_block);
int mem_per_block = (x_per_block + 2 * y_width) * sizeof(T);
dim3 grid_dim(num_x_blocks, batch_size);
auto stream = context.template device_context<phi::GPUContext>().stream();
hipLaunchKernelGGL(( ConvShiftForward<T>), dim3(grid_dim), dim3(x_per_block), mem_per_block, stream,
x_data, y_data, x_width, y_width, y_half_width, batch_size, out_data);
}
};
template <typename T>
class ConvShiftGradKernel<T, phi::GPUContext> : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const phi::DenseTensor *X = context.Input<phi::DenseTensor>("X");
const phi::DenseTensor *Y = context.Input<phi::DenseTensor>("Y");
const phi::DenseTensor *dOut =
context.Input<phi::DenseTensor>(framework::GradVarName("Out"));
const T *x_data = X->data<T>();
const T *y_data = Y->data<T>();
const T *dout_data = dOut->data<T>();
phi::DenseTensor *dX =
context.Output<phi::DenseTensor>(framework::GradVarName("X"));
phi::DenseTensor *dY =
context.Output<phi::DenseTensor>(framework::GradVarName("Y"));
int batch_size = X->dims()[0];
int x_width = X->dims()[1];
int y_width = Y->dims()[1];
int y_half_width = (y_width - 1) / 2;
auto &device_ctx = context.template device_context<phi::GPUContext>();
phi::funcs::SetConstant<phi::GPUContext, T> zero;
const int x_per_block = 256;
int num_x_blocks = DivUp(x_width, x_per_block);
dim3 grid_dim(num_x_blocks, y_width, batch_size);
if (dX) {
T *dx_data = dX->mutable_data<T>(context.GetPlace());
zero(device_ctx, dX, static_cast<T>(0.0));
hipLaunchKernelGGL(( ConvShiftGradX<T>)
, dim3(grid_dim), dim3(x_per_block), 0, device_ctx.stream(), dout_data,
y_data,
x_width,
y_width,
y_half_width,
batch_size,
dx_data);
}
if (dY) {
T *dy_data = dY->mutable_data<T>(context.GetPlace());
zero(device_ctx, dY, static_cast<T>(0.0));
hipLaunchKernelGGL(( ConvShiftDy<T>)
, dim3(grid_dim), dim3(x_per_block), 0, device_ctx.stream(), x_data,
dout_data,
x_width,
y_width,
y_half_width,
batch_size,
dy_data);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(
conv_shift, GPU, ALL_LAYOUT, ops::ConvShiftKernel, float) {}
PD_REGISTER_STRUCT_KERNEL(
conv_shift_grad, GPU, ALL_LAYOUT, ops::ConvShiftGradKernel, float) {}
| c5231f9d8900c245106770610ca1bc241e36dd26.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/conv_shift_op.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
namespace {
inline int DivUp(int x, int y) { return (x + y - 1) / y; }
// Some notes on the design:
//
// Each thread is responsible for computing a single output out[k, i].
// Thread blocks are based on tiles of x with height 1 in the batch dimension.
//
// This design is based on the typical use case where the filter
// y is fairly small. For large y, it would probably be more efficient
// to also tile across y.
template <typename T>
__global__ void ConvShiftForward(const T *x,
const T *y,
int x_width,
int y_width,
int y_half_width,
int batch_size,
T *out) {
extern __shared__ T mem[];
int tx = threadIdx.x;
int i = blockIdx.x * blockDim.x + tx; // global x index
int k = blockIdx.y; // batch index
// Check if we are in a boundary block with fewer x's to process than
// blockDim.x.
int num_x =
(blockIdx.x == gridDim.x - 1) ? (x_width % blockDim.x) : blockDim.x;
T *sx = mem;
T *sx_pad = &mem[num_x];
T *sy = &mem[blockDim.x + y_width];
// Collaboratively load y[k, :] and length-y padding of x into shared memory.
int pad_start = blockIdx.x * blockDim.x + num_x + x_width - y_half_width;
for (int j = tx; j < y_width; j += blockDim.x) {
sy[j] = y[k * y_width + j];
sx_pad[j] = x[k * x_width + (pad_start + j) % x_width];
}
// Load a cyclically shifted slice of x into shared memory.
if (tx < num_x) {
int load_i = (i - y_half_width + x_width) % x_width;
sx[tx] = x[k * x_width + load_i];
}
__syncthreads();
if (tx < num_x) {
// Compute dot product of sx[tx:tx + y_width] and sy.
T sum = 0;
for (int j = 0; j < y_width; ++j) {
sum += sx[tx + j] * sy[j];
}
// Save to out[k, i].
out[k * x_width + i] = sum;
}
}
// Compute x gradient - initial naive implementation with atomic add.
template <typename T>
__global__ void ConvShiftGradX(const T *dout,
const T *y,
int x_width,
int y_width,
int y_half_width,
int batch_size,
T *dx) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // x index
int j = blockIdx.y; // y index
int k = blockIdx.z; // batch index
if (i < x_width) {
int index = (i + j - y_half_width + x_width) % x_width;
atomicAdd(&dx[k * x_width + index],
dout[k * x_width + i] * y[k * y_width + j]);
}
}
// Compute y gradient - initial naive implementation with atomic add.
template <typename T>
__global__ void ConvShiftDy(const T *x,
const T *dout,
int x_width,
int y_width,
int y_half_width,
int batch_size,
T *dy) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // x index
int j = blockIdx.y; // y index
int k = blockIdx.z; // batch index
if (i < x_width) {
int index = (i + j - y_half_width + x_width) % x_width;
atomicAdd(&dy[k * y_width + j],
x[k * x_width + index] * dout[k * x_width + i]);
}
}
} // namespace
template <typename T>
class ConvShiftKernel<T, phi::GPUContext> : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const phi::DenseTensor *X = context.Input<phi::DenseTensor>("X");
const phi::DenseTensor *Y = context.Input<phi::DenseTensor>("Y");
phi::DenseTensor *Out = context.Output<phi::DenseTensor>("Out");
const T *x_data = X->data<T>();
const T *y_data = Y->data<T>();
T *out_data = Out->mutable_data<T>(context.GetPlace());
int batch_size = X->dims()[0];
int x_width = X->dims()[1];
int y_width = Y->dims()[1];
int y_half_width = (y_width - 1) / 2;
const int x_per_block = 256;
int num_x_blocks = DivUp(x_width, x_per_block);
int mem_per_block = (x_per_block + 2 * y_width) * sizeof(T);
dim3 grid_dim(num_x_blocks, batch_size);
auto stream = context.template device_context<phi::GPUContext>().stream();
ConvShiftForward<T><<<grid_dim, x_per_block, mem_per_block, stream>>>(
x_data, y_data, x_width, y_width, y_half_width, batch_size, out_data);
}
};
template <typename T>
class ConvShiftGradKernel<T, phi::GPUContext> : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const phi::DenseTensor *X = context.Input<phi::DenseTensor>("X");
const phi::DenseTensor *Y = context.Input<phi::DenseTensor>("Y");
const phi::DenseTensor *dOut =
context.Input<phi::DenseTensor>(framework::GradVarName("Out"));
const T *x_data = X->data<T>();
const T *y_data = Y->data<T>();
const T *dout_data = dOut->data<T>();
phi::DenseTensor *dX =
context.Output<phi::DenseTensor>(framework::GradVarName("X"));
phi::DenseTensor *dY =
context.Output<phi::DenseTensor>(framework::GradVarName("Y"));
int batch_size = X->dims()[0];
int x_width = X->dims()[1];
int y_width = Y->dims()[1];
int y_half_width = (y_width - 1) / 2;
auto &device_ctx = context.template device_context<phi::GPUContext>();
phi::funcs::SetConstant<phi::GPUContext, T> zero;
const int x_per_block = 256;
int num_x_blocks = DivUp(x_width, x_per_block);
dim3 grid_dim(num_x_blocks, y_width, batch_size);
if (dX) {
T *dx_data = dX->mutable_data<T>(context.GetPlace());
zero(device_ctx, dX, static_cast<T>(0.0));
ConvShiftGradX<T>
<<<grid_dim, x_per_block, 0, device_ctx.stream()>>>(dout_data,
y_data,
x_width,
y_width,
y_half_width,
batch_size,
dx_data);
}
if (dY) {
T *dy_data = dY->mutable_data<T>(context.GetPlace());
zero(device_ctx, dY, static_cast<T>(0.0));
ConvShiftDy<T>
<<<grid_dim, x_per_block, 0, device_ctx.stream()>>>(x_data,
dout_data,
x_width,
y_width,
y_half_width,
batch_size,
dy_data);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(
conv_shift, GPU, ALL_LAYOUT, ops::ConvShiftKernel, float) {}
PD_REGISTER_STRUCT_KERNEL(
conv_shift_grad, GPU, ALL_LAYOUT, ops::ConvShiftGradKernel, float) {}
|
ee1d86694ec3719934c676b1060a09b4edffa0f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace split_merge
{
template <typename T, size_t elem_size = sizeof(T)>
struct TypeTraits
{
typedef T type;
typedef T type2;
typedef T type3;
typedef T type4;
};
template <typename T>
struct TypeTraits<T, 1>
{
typedef char type;
typedef char2 type2;
typedef char3 type3;
typedef char4 type4;
};
template <typename T>
struct TypeTraits<T, 2>
{
typedef short type;
typedef short2 type2;
typedef short3 type3;
typedef short4 type4;
};
template <typename T>
struct TypeTraits<T, 4>
{
typedef int type;
typedef int2 type2;
typedef int3 type3;
typedef int4 type4;
};
template <typename T>
struct TypeTraits<T, 8>
{
typedef double type;
typedef double2 type2;
//typedef double3 type3;
//typedef double4 type3;
};
typedef void (*MergeFunction)(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream);
typedef void (*SplitFunction)(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream);
//------------------------------------------------------------
// Merge
template <typename T>
__global__ void mergeC2_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type2 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_y[x] = dst_elem;
}
}
template <typename T>
__global__ void mergeC3_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type3 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC3_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
double* dst_y = (double*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[3 * x] = src0_y[x];
dst_y[3 * x + 1] = src1_y[x];
dst_y[3 * x + 2] = src2_y[x];
}
}
template <typename T>
__global__ void mergeC4_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type4 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
const T* src3_y = (const T*)(src3 + y * src3_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_elem.w = src3_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC4_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
const double* src3_y = (const double*)(src3 + y * src3_step);
double2* dst_y = (double2*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[2 * x] = make_double2(src0_y[x], src1_y[x]);
dst_y[2 * x + 1] = make_double2(src2_y[x], src3_y[x]);
}
}
template <typename T>
static void mergeC2_(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( mergeC2_<T>), dim3(grid), dim3(block), 0, stream,
src[0].data, src[0].step,
src[1].data, src[1].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void mergeC3_(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( mergeC3_<T>), dim3(grid), dim3(block), 0, stream,
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void mergeC4_(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( mergeC4_<T>), dim3(grid), dim3(block), 0, stream,
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
src[3].data, src[3].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
void merge(const PtrStepSzb* src, PtrStepSzb& dst,
int total_channels, size_t elem_size,
const hipStream_t& stream)
{
static MergeFunction merge_func_tbl[] =
{
mergeC2_<char>, mergeC2_<short>, mergeC2_<int>, 0, mergeC2_<double>,
mergeC3_<char>, mergeC3_<short>, mergeC3_<int>, 0, mergeC3_<double>,
mergeC4_<char>, mergeC4_<short>, mergeC4_<int>, 0, mergeC4_<double>,
};
size_t merge_func_id = (total_channels - 2) * 5 + (elem_size >> 1);
MergeFunction merge_func = merge_func_tbl[merge_func_id];
if (merge_func == 0)
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported channel count or data type");
merge_func(src, dst, stream);
}
//------------------------------------------------------------
// Split
template <typename T>
__global__ void splitC2_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step)
{
typedef typename TypeTraits<T>::type2 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
}
}
template <typename T>
__global__ void splitC3_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
typedef typename TypeTraits<T>::type3 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
}
}
template <>
__global__ void splitC3_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src_y = (const double*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
dst0_y[x] = src_y[3 * x];
dst1_y[x] = src_y[3 * x + 1];
dst2_y[x] = src_y[3 * x + 2];
}
}
template <typename T>
__global__ void splitC4_(const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
typedef typename TypeTraits<T>::type4 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
T* dst3_y = (T*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
dst3_y[x] = src_elem.w;
}
}
template <>
__global__ void splitC4_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double2* src_y = (const double2*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
double* dst3_y = (double*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
double2 src_elem1 = src_y[2 * x];
double2 src_elem2 = src_y[2 * x + 1];
dst0_y[x] = src_elem1.x;
dst1_y[x] = src_elem1.y;
dst2_y[x] = src_elem2.x;
dst3_y[x] = src_elem2.y;
}
}
template <typename T>
static void splitC2_(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( splitC2_<T>), dim3(grid), dim3(block), 0, stream,
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void splitC3_(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( splitC3_<T>), dim3(grid), dim3(block), 0, stream,
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void splitC4_(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( splitC4_<T>), dim3(grid), dim3(block), 0, stream,
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step,
dst[3].data, dst[3].step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
void split(const PtrStepSzb& src, PtrStepSzb* dst, int num_channels, size_t elem_size1, const hipStream_t& stream)
{
static SplitFunction split_func_tbl[] =
{
splitC2_<char>, splitC2_<short>, splitC2_<int>, 0, splitC2_<double>,
splitC3_<char>, splitC3_<short>, splitC3_<int>, 0, splitC3_<double>,
splitC4_<char>, splitC4_<short>, splitC4_<int>, 0, splitC4_<double>,
};
size_t split_func_id = (num_channels - 2) * 5 + (elem_size1 >> 1);
SplitFunction split_func = split_func_tbl[split_func_id];
if (split_func == 0)
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported channel count or data type");
split_func(src, dst, stream);
}
} // namespace split_merge
}}} // namespace cv { namespace gpu { namespace cudev
#endif /* CUDA_DISABLER */
| ee1d86694ec3719934c676b1060a09b4edffa0f5.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace split_merge
{
template <typename T, size_t elem_size = sizeof(T)>
struct TypeTraits
{
typedef T type;
typedef T type2;
typedef T type3;
typedef T type4;
};
template <typename T>
struct TypeTraits<T, 1>
{
typedef char type;
typedef char2 type2;
typedef char3 type3;
typedef char4 type4;
};
template <typename T>
struct TypeTraits<T, 2>
{
typedef short type;
typedef short2 type2;
typedef short3 type3;
typedef short4 type4;
};
template <typename T>
struct TypeTraits<T, 4>
{
typedef int type;
typedef int2 type2;
typedef int3 type3;
typedef int4 type4;
};
template <typename T>
struct TypeTraits<T, 8>
{
typedef double type;
typedef double2 type2;
//typedef double3 type3;
//typedef double4 type3;
};
typedef void (*MergeFunction)(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream);
typedef void (*SplitFunction)(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream);
//------------------------------------------------------------
// Merge
template <typename T>
__global__ void mergeC2_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type2 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_y[x] = dst_elem;
}
}
template <typename T>
__global__ void mergeC3_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type3 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC3_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
double* dst_y = (double*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[3 * x] = src0_y[x];
dst_y[3 * x + 1] = src1_y[x];
dst_y[3 * x + 2] = src2_y[x];
}
}
template <typename T>
__global__ void mergeC4_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type4 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
const T* src3_y = (const T*)(src3 + y * src3_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_elem.w = src3_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC4_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
const double* src3_y = (const double*)(src3 + y * src3_step);
double2* dst_y = (double2*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[2 * x] = make_double2(src0_y[x], src1_y[x]);
dst_y[2 * x + 1] = make_double2(src2_y[x], src3_y[x]);
}
}
template <typename T>
static void mergeC2_(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
mergeC2_<T><<<grid, block, 0, stream>>>(
src[0].data, src[0].step,
src[1].data, src[1].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void mergeC3_(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
mergeC3_<T><<<grid, block, 0, stream>>>(
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void mergeC4_(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
mergeC4_<T><<<grid, block, 0, stream>>>(
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
src[3].data, src[3].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void merge(const PtrStepSzb* src, PtrStepSzb& dst,
int total_channels, size_t elem_size,
const cudaStream_t& stream)
{
static MergeFunction merge_func_tbl[] =
{
mergeC2_<char>, mergeC2_<short>, mergeC2_<int>, 0, mergeC2_<double>,
mergeC3_<char>, mergeC3_<short>, mergeC3_<int>, 0, mergeC3_<double>,
mergeC4_<char>, mergeC4_<short>, mergeC4_<int>, 0, mergeC4_<double>,
};
size_t merge_func_id = (total_channels - 2) * 5 + (elem_size >> 1);
MergeFunction merge_func = merge_func_tbl[merge_func_id];
if (merge_func == 0)
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported channel count or data type");
merge_func(src, dst, stream);
}
//------------------------------------------------------------
// Split
template <typename T>
__global__ void splitC2_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step)
{
typedef typename TypeTraits<T>::type2 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
}
}
template <typename T>
__global__ void splitC3_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
typedef typename TypeTraits<T>::type3 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
}
}
template <>
__global__ void splitC3_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src_y = (const double*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
dst0_y[x] = src_y[3 * x];
dst1_y[x] = src_y[3 * x + 1];
dst2_y[x] = src_y[3 * x + 2];
}
}
template <typename T>
__global__ void splitC4_(const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
typedef typename TypeTraits<T>::type4 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
T* dst3_y = (T*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
dst3_y[x] = src_elem.w;
}
}
template <>
__global__ void splitC4_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double2* src_y = (const double2*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
double* dst3_y = (double*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
double2 src_elem1 = src_y[2 * x];
double2 src_elem2 = src_y[2 * x + 1];
dst0_y[x] = src_elem1.x;
dst1_y[x] = src_elem1.y;
dst2_y[x] = src_elem2.x;
dst3_y[x] = src_elem2.y;
}
}
template <typename T>
static void splitC2_(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
splitC2_<T><<<grid, block, 0, stream>>>(
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void splitC3_(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
splitC3_<T><<<grid, block, 0, stream>>>(
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void splitC4_(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
splitC4_<T><<<grid, block, 0, stream>>>(
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step,
dst[3].data, dst[3].step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void split(const PtrStepSzb& src, PtrStepSzb* dst, int num_channels, size_t elem_size1, const cudaStream_t& stream)
{
static SplitFunction split_func_tbl[] =
{
splitC2_<char>, splitC2_<short>, splitC2_<int>, 0, splitC2_<double>,
splitC3_<char>, splitC3_<short>, splitC3_<int>, 0, splitC3_<double>,
splitC4_<char>, splitC4_<short>, splitC4_<int>, 0, splitC4_<double>,
};
size_t split_func_id = (num_channels - 2) * 5 + (elem_size1 >> 1);
SplitFunction split_func = split_func_tbl[split_func_id];
if (split_func == 0)
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported channel count or data type");
split_func(src, dst, stream);
}
} // namespace split_merge
}}} // namespace cv { namespace gpu { namespace cudev
#endif /* CUDA_DISABLER */
|
b5cc64ae35c0bae958b19fcd6eb78e850ab82ab2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <chrono>
#include <roctracer/roctx.h>
#include <argparse/argparse.hpp>
#include "common.hpp"
#define TILE_SZ_A 64
#define TILE_SZ_B 16
#define TILE_SZ_RATIO (TILE_SZ_A / TILE_SZ_B)
/* NOTE: A and C are column major, B is row major
*/
__global__ void mygemm(float *__restrict__ c, //<! [out] and MxN matrix
const float *a, //<! [in] an MxK matrix
const float *b, //<! [in] an KxN matrix
const int M, const int N, const int K) {
// Macros for accessing flattened matrices
#define A(_i, _j) a[(_i) + (_j)*M]
#define B(_i, _j) b[(_i)*N + (_j)]
#define C(_i, _j) c[(_i) + (_j)*M]
// Shared memory for tiling input B array
__shared__ float B_s[TILE_SZ_RATIO][TILE_SZ_B];
// Index variables
const unsigned int row = blockDim.x * blockIdx.x + threadIdx.x;
const unsigned int col = blockIdx.y * TILE_SZ_B;
// Privatization of output variables
float c_reg[TILE_SZ_B];
// Initialize output values
for (unsigned int outIdx = 0; outIdx < TILE_SZ_B; ++outIdx) {
c_reg[outIdx] = 0;
}
// Loop over the input tiles
for (unsigned int tileIdx = 0; tileIdx < (K - 1) / TILE_SZ_RATIO + 1;
++tileIdx) {
// Load the tile of B into shared memory
const unsigned int i = threadIdx.x / TILE_SZ_B;
const unsigned int j = threadIdx.x % TILE_SZ_B;
if (tileIdx * TILE_SZ_RATIO + i < K && col + j < N) {
B_s[i][j] = B(tileIdx * TILE_SZ_RATIO + i, col + j);
} else {
B_s[i][j] = 0;
}
__syncthreads();
// Loop over elements inside the tile
for (unsigned int idx = 0; idx < TILE_SZ_RATIO; ++idx) {
// Load tile of A matrix into register
float a_reg;
if (row < M && tileIdx * TILE_SZ_RATIO + idx < K) {
a_reg = A(row, tileIdx * TILE_SZ_RATIO + idx);
} else {
a_reg = 0;
}
// Loop over and update the output elements assigned to the thread
for (unsigned int outIdx = 0; outIdx < TILE_SZ_B; ++outIdx) {
c_reg[outIdx] += a_reg * B_s[idx][outIdx];
}
}
__syncthreads();
}
for (unsigned int outIdx = 0; outIdx < TILE_SZ_B; ++outIdx) {
if (row < M && col + outIdx < N) {
C(row, col + outIdx) = c_reg[outIdx];
}
}
#undef A
#undef B
#undef C
}
int main(int argc, char **argv) {
argparse::Parser parser;
// default matrix sizes:
// A: 1600 x 1500
// B: 1500 x 1400
// C: 1600 x 1400
int m = 1600;
int n = 1400;
int k = 1500;
int nIters = 10;
int nWarmup = 5;
parser.add_positional(m);
parser.add_positional(n);
parser.add_positional(k);
parser.add_option(nIters, "--iters");
parser.add_option(nWarmup, "--warmup");
if (!parser.parse(argc, argv)) {
parser.help();
exit(EXIT_FAILURE);
}
// 4 muls of m/2, n/2, k
const int64_t flop = int64_t(m) / 2 * int64_t(n) / 2 * int64_t(k) * 2 * 4 * nIters;
// initialize host data
std::cout << "generate data\n";
roctxRangePush("generate data");
float *aHost[2], *bHost[2], *cHost[2][2];
CUDA_RUNTIME(hipHostMalloc(&aHost[0], m / 2 * k * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&aHost[1], m / 2 * k * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&bHost[0], k * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&bHost[1], k * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&cHost[0][0], m / 2 * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&cHost[0][1], m / 2 * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&cHost[1][0], m / 2 * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&cHost[1][1], m / 2 * n / 2 * sizeof(float), 0));
std::generate(aHost[0], aHost[0] + m / 2 * k, random_int);
std::generate(aHost[1], aHost[1] + m / 2 * k, random_int);
std::generate(bHost[0], bHost[0] + k * n / 2, random_int);
std::generate(bHost[1], bHost[1] + k * n / 2, random_int);
roctxRangePop();
// allocate device data
std::cout << "allocate data\n";
float *aDev[2], *bDev[2], *cDev[2][2];
CUDA_RUNTIME(hipMalloc(&aDev[0], m / 2 * k * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&aDev[1], m / 2 * k * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&bDev[0], k * n / 2 * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&bDev[1], k * n / 2 * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&cDev[0][0], m / 2 * n / 2 * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&cDev[0][1], m / 2 * n / 2 * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&cDev[1][0], m / 2 * n / 2 * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&cDev[1][1], m / 2 * n / 2 * sizeof(float)));
// create streams for copy and kernels
hipStream_t copyStream, kernelStream;
CUDA_RUNTIME(hipStreamCreate(©Stream));
CUDA_RUNTIME(hipStreamCreate(&kernelStream));
hipEvent_t waitForA0B0, waitForA1, waitForB1, waitC[2][2];
CUDA_RUNTIME(hipEventCreate(&waitForA0B0));
CUDA_RUNTIME(hipEventCreate(&waitForA1));
CUDA_RUNTIME(hipEventCreate(&waitForB1));
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 2; ++j) {
CUDA_RUNTIME(hipEventCreate(&waitC[i][j]));
}
}
// GPU kernel launch parameters
dim3 dimGrid((m/2 + TILE_SZ_A - 1) / TILE_SZ_A, (n/2 +TILE_SZ_B - 1) / TILE_SZ_B);
dim3 dimBlock(TILE_SZ_A, 1);
float kernelTime = 0;
float wallTime = 0;
for (int iter = 0; iter < nIters + nWarmup; ++iter) {
roctxRangePush("wall time");
auto wallStart = Clock::now();
// copy a0 and b0
CUDA_RUNTIME(hipMemcpyAsync(aDev[0], aHost[0], m / 2 * k * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipMemcpyAsync(bDev[0], bHost[0], k * n / 2 * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipEventRecord(waitForA0B0, copyStream));
// have the kernelStream wait for the transfers to complete
CUDA_RUNTIME(hipStreamWaitEvent(kernelStream, waitForA0B0, 0));
// launch c[0][0] = a[0] * b[0]
hipLaunchKernelGGL(( mygemm), dim3(dimGrid), dim3(dimBlock), 0, kernelStream, cDev[0][0], aDev[0], bDev[0],
m / 2, n / 2, k);
CUDA_RUNTIME(hipEventRecord(waitC[0][0], kernelStream));
// copy a1
CUDA_RUNTIME(hipMemcpyAsync(aDev[1], aHost[1], m / 2 * k * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipEventRecord(waitForA1, kernelStream));
// launch c[1][0] = a[1] * b[0] after a[1] is on the GPU
CUDA_RUNTIME(hipStreamWaitEvent(kernelStream, waitForA1, 0));
hipLaunchKernelGGL(( mygemm), dim3(dimGrid), dim3(dimBlock), 0, kernelStream, cDev[1][0], aDev[1], bDev[0],
m / 2, n / 2, k);
CUDA_RUNTIME(hipEventRecord(waitC[1][0], kernelStream));
// copy b1
CUDA_RUNTIME(hipMemcpyAsync(bDev[1], bHost[1], k * n / 2 * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipEventRecord(waitForB1, kernelStream));
// launch c[0][1] = a[0] * b[1] after B1 is on the GPU
CUDA_RUNTIME(hipStreamWaitEvent(kernelStream, waitForB1, 0));
hipLaunchKernelGGL(( mygemm), dim3(dimGrid), dim3(dimBlock), 0, kernelStream, cDev[0][1], aDev[0], bDev[1],
m / 2, n / 2, k);
CUDA_RUNTIME(hipEventRecord(waitC[0][1], kernelStream));
// launch c[1][1] = a[1] * b[1]
hipLaunchKernelGGL(( mygemm), dim3(dimGrid), dim3(dimBlock), 0, kernelStream, cDev[1][1], aDev[1], bDev[1],
m / 2, n / 2, k);
CUDA_RUNTIME(hipEventRecord(waitC[1][1], kernelStream));
// copy c back to CPU as kernels finish
CUDA_RUNTIME(hipStreamWaitEvent(copyStream, waitC[0][0], 0));
CUDA_RUNTIME(hipMemcpyAsync(cHost[0][0], cDev[0][0],
m / 2 * n / 2 * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipStreamWaitEvent(copyStream, waitC[1][0], 0));
CUDA_RUNTIME(hipMemcpyAsync(cHost[1][0], cDev[1][0],
m / 2 * n / 2 * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipStreamWaitEvent(copyStream, waitC[0][1], 0));
CUDA_RUNTIME(hipMemcpyAsync(cHost[0][1], cDev[0][1],
m / 2 * n / 2 * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipStreamWaitEvent(copyStream, waitC[1][1], 0));
CUDA_RUNTIME(hipMemcpyAsync(cHost[1][1], cDev[1][1],
m / 2 * n / 2 * sizeof(float),
hipMemcpyDefault, copyStream));
CUDA_RUNTIME(hipDeviceSynchronize());
roctxRangePop(); // wall time
Duration wallElapsed = Clock::now() - wallStart;
// kernel time
float kernelElapsed;
CUDA_RUNTIME(hipEventElapsedTime(&kernelElapsed, waitForA0B0, waitC[1][1]));
kernelElapsed /= 1000; // seconds
std::cout << iter << " kernel=" << kernelElapsed
<< " wall=" << wallElapsed.count()
<< (iter >= nWarmup ? " *" : " ") << "\n";
if (iter >= nWarmup) {
wallTime += wallElapsed.count();
kernelTime += kernelElapsed;
}
}
// print results
double kernelGflops = flop / 1e9 / kernelTime;
std::cout << "kernel " << kernelGflops << "GFLOPS (" << flop << " flop, "
<< kernelTime << "s)\n";
double wallGflops = flop / 1e9 / wallTime;
std::cout << "wall " << wallGflops << "GFLOPS (" << flop << " flop, "
<< wallTime << "s)\n";
// release resources
CUDA_RUNTIME(hipFree(aDev[0]));
CUDA_RUNTIME(hipFree(aDev[1]));
CUDA_RUNTIME(hipFree(bDev[0]));
CUDA_RUNTIME(hipFree(bDev[1]));
return 0;
}
| b5cc64ae35c0bae958b19fcd6eb78e850ab82ab2.cu | #include <algorithm>
#include <chrono>
#include <nvToolsExt.h>
#include <argparse/argparse.hpp>
#include "common.hpp"
#define TILE_SZ_A 64
#define TILE_SZ_B 16
#define TILE_SZ_RATIO (TILE_SZ_A / TILE_SZ_B)
/* NOTE: A and C are column major, B is row major
*/
__global__ void mygemm(float *__restrict__ c, //<! [out] and MxN matrix
const float *a, //<! [in] an MxK matrix
const float *b, //<! [in] an KxN matrix
const int M, const int N, const int K) {
// Macros for accessing flattened matrices
#define A(_i, _j) a[(_i) + (_j)*M]
#define B(_i, _j) b[(_i)*N + (_j)]
#define C(_i, _j) c[(_i) + (_j)*M]
// Shared memory for tiling input B array
__shared__ float B_s[TILE_SZ_RATIO][TILE_SZ_B];
// Index variables
const unsigned int row = blockDim.x * blockIdx.x + threadIdx.x;
const unsigned int col = blockIdx.y * TILE_SZ_B;
// Privatization of output variables
float c_reg[TILE_SZ_B];
// Initialize output values
for (unsigned int outIdx = 0; outIdx < TILE_SZ_B; ++outIdx) {
c_reg[outIdx] = 0;
}
// Loop over the input tiles
for (unsigned int tileIdx = 0; tileIdx < (K - 1) / TILE_SZ_RATIO + 1;
++tileIdx) {
// Load the tile of B into shared memory
const unsigned int i = threadIdx.x / TILE_SZ_B;
const unsigned int j = threadIdx.x % TILE_SZ_B;
if (tileIdx * TILE_SZ_RATIO + i < K && col + j < N) {
B_s[i][j] = B(tileIdx * TILE_SZ_RATIO + i, col + j);
} else {
B_s[i][j] = 0;
}
__syncthreads();
// Loop over elements inside the tile
for (unsigned int idx = 0; idx < TILE_SZ_RATIO; ++idx) {
// Load tile of A matrix into register
float a_reg;
if (row < M && tileIdx * TILE_SZ_RATIO + idx < K) {
a_reg = A(row, tileIdx * TILE_SZ_RATIO + idx);
} else {
a_reg = 0;
}
// Loop over and update the output elements assigned to the thread
for (unsigned int outIdx = 0; outIdx < TILE_SZ_B; ++outIdx) {
c_reg[outIdx] += a_reg * B_s[idx][outIdx];
}
}
__syncthreads();
}
for (unsigned int outIdx = 0; outIdx < TILE_SZ_B; ++outIdx) {
if (row < M && col + outIdx < N) {
C(row, col + outIdx) = c_reg[outIdx];
}
}
#undef A
#undef B
#undef C
}
int main(int argc, char **argv) {
argparse::Parser parser;
// default matrix sizes:
// A: 1600 x 1500
// B: 1500 x 1400
// C: 1600 x 1400
int m = 1600;
int n = 1400;
int k = 1500;
int nIters = 10;
int nWarmup = 5;
parser.add_positional(m);
parser.add_positional(n);
parser.add_positional(k);
parser.add_option(nIters, "--iters");
parser.add_option(nWarmup, "--warmup");
if (!parser.parse(argc, argv)) {
parser.help();
exit(EXIT_FAILURE);
}
// 4 muls of m/2, n/2, k
const int64_t flop = int64_t(m) / 2 * int64_t(n) / 2 * int64_t(k) * 2 * 4 * nIters;
// initialize host data
std::cout << "generate data\n";
nvtxRangePush("generate data");
float *aHost[2], *bHost[2], *cHost[2][2];
CUDA_RUNTIME(cudaHostAlloc(&aHost[0], m / 2 * k * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&aHost[1], m / 2 * k * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&bHost[0], k * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&bHost[1], k * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&cHost[0][0], m / 2 * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&cHost[0][1], m / 2 * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&cHost[1][0], m / 2 * n / 2 * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&cHost[1][1], m / 2 * n / 2 * sizeof(float), 0));
std::generate(aHost[0], aHost[0] + m / 2 * k, random_int);
std::generate(aHost[1], aHost[1] + m / 2 * k, random_int);
std::generate(bHost[0], bHost[0] + k * n / 2, random_int);
std::generate(bHost[1], bHost[1] + k * n / 2, random_int);
nvtxRangePop();
// allocate device data
std::cout << "allocate data\n";
float *aDev[2], *bDev[2], *cDev[2][2];
CUDA_RUNTIME(cudaMalloc(&aDev[0], m / 2 * k * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&aDev[1], m / 2 * k * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&bDev[0], k * n / 2 * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&bDev[1], k * n / 2 * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&cDev[0][0], m / 2 * n / 2 * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&cDev[0][1], m / 2 * n / 2 * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&cDev[1][0], m / 2 * n / 2 * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&cDev[1][1], m / 2 * n / 2 * sizeof(float)));
// create streams for copy and kernels
cudaStream_t copyStream, kernelStream;
CUDA_RUNTIME(cudaStreamCreate(©Stream));
CUDA_RUNTIME(cudaStreamCreate(&kernelStream));
cudaEvent_t waitForA0B0, waitForA1, waitForB1, waitC[2][2];
CUDA_RUNTIME(cudaEventCreate(&waitForA0B0));
CUDA_RUNTIME(cudaEventCreate(&waitForA1));
CUDA_RUNTIME(cudaEventCreate(&waitForB1));
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 2; ++j) {
CUDA_RUNTIME(cudaEventCreate(&waitC[i][j]));
}
}
// GPU kernel launch parameters
dim3 dimGrid((m/2 + TILE_SZ_A - 1) / TILE_SZ_A, (n/2 +TILE_SZ_B - 1) / TILE_SZ_B);
dim3 dimBlock(TILE_SZ_A, 1);
float kernelTime = 0;
float wallTime = 0;
for (int iter = 0; iter < nIters + nWarmup; ++iter) {
nvtxRangePush("wall time");
auto wallStart = Clock::now();
// copy a0 and b0
CUDA_RUNTIME(cudaMemcpyAsync(aDev[0], aHost[0], m / 2 * k * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaMemcpyAsync(bDev[0], bHost[0], k * n / 2 * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaEventRecord(waitForA0B0, copyStream));
// have the kernelStream wait for the transfers to complete
CUDA_RUNTIME(cudaStreamWaitEvent(kernelStream, waitForA0B0, 0));
// launch c[0][0] = a[0] * b[0]
mygemm<<<dimGrid, dimBlock, 0, kernelStream>>>(cDev[0][0], aDev[0], bDev[0],
m / 2, n / 2, k);
CUDA_RUNTIME(cudaEventRecord(waitC[0][0], kernelStream));
// copy a1
CUDA_RUNTIME(cudaMemcpyAsync(aDev[1], aHost[1], m / 2 * k * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaEventRecord(waitForA1, kernelStream));
// launch c[1][0] = a[1] * b[0] after a[1] is on the GPU
CUDA_RUNTIME(cudaStreamWaitEvent(kernelStream, waitForA1, 0));
mygemm<<<dimGrid, dimBlock, 0, kernelStream>>>(cDev[1][0], aDev[1], bDev[0],
m / 2, n / 2, k);
CUDA_RUNTIME(cudaEventRecord(waitC[1][0], kernelStream));
// copy b1
CUDA_RUNTIME(cudaMemcpyAsync(bDev[1], bHost[1], k * n / 2 * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaEventRecord(waitForB1, kernelStream));
// launch c[0][1] = a[0] * b[1] after B1 is on the GPU
CUDA_RUNTIME(cudaStreamWaitEvent(kernelStream, waitForB1, 0));
mygemm<<<dimGrid, dimBlock, 0, kernelStream>>>(cDev[0][1], aDev[0], bDev[1],
m / 2, n / 2, k);
CUDA_RUNTIME(cudaEventRecord(waitC[0][1], kernelStream));
// launch c[1][1] = a[1] * b[1]
mygemm<<<dimGrid, dimBlock, 0, kernelStream>>>(cDev[1][1], aDev[1], bDev[1],
m / 2, n / 2, k);
CUDA_RUNTIME(cudaEventRecord(waitC[1][1], kernelStream));
// copy c back to CPU as kernels finish
CUDA_RUNTIME(cudaStreamWaitEvent(copyStream, waitC[0][0], 0));
CUDA_RUNTIME(cudaMemcpyAsync(cHost[0][0], cDev[0][0],
m / 2 * n / 2 * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaStreamWaitEvent(copyStream, waitC[1][0], 0));
CUDA_RUNTIME(cudaMemcpyAsync(cHost[1][0], cDev[1][0],
m / 2 * n / 2 * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaStreamWaitEvent(copyStream, waitC[0][1], 0));
CUDA_RUNTIME(cudaMemcpyAsync(cHost[0][1], cDev[0][1],
m / 2 * n / 2 * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaStreamWaitEvent(copyStream, waitC[1][1], 0));
CUDA_RUNTIME(cudaMemcpyAsync(cHost[1][1], cDev[1][1],
m / 2 * n / 2 * sizeof(float),
cudaMemcpyDefault, copyStream));
CUDA_RUNTIME(cudaDeviceSynchronize());
nvtxRangePop(); // wall time
Duration wallElapsed = Clock::now() - wallStart;
// kernel time
float kernelElapsed;
CUDA_RUNTIME(cudaEventElapsedTime(&kernelElapsed, waitForA0B0, waitC[1][1]));
kernelElapsed /= 1000; // seconds
std::cout << iter << " kernel=" << kernelElapsed
<< " wall=" << wallElapsed.count()
<< (iter >= nWarmup ? " *" : " ") << "\n";
if (iter >= nWarmup) {
wallTime += wallElapsed.count();
kernelTime += kernelElapsed;
}
}
// print results
double kernelGflops = flop / 1e9 / kernelTime;
std::cout << "kernel " << kernelGflops << "GFLOPS (" << flop << " flop, "
<< kernelTime << "s)\n";
double wallGflops = flop / 1e9 / wallTime;
std::cout << "wall " << wallGflops << "GFLOPS (" << flop << " flop, "
<< wallTime << "s)\n";
// release resources
CUDA_RUNTIME(cudaFree(aDev[0]));
CUDA_RUNTIME(cudaFree(aDev[1]));
CUDA_RUNTIME(cudaFree(bDev[0]));
CUDA_RUNTIME(cudaFree(bDev[1]));
return 0;
}
|
cd2092ef064555f375040881979d5ce8dbb5b7c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include "cutil_inline.h"
#include "Mandelbrot_kernel.h"
#include "Mandelbrot_kernel.cu"
// The Mandelbrot CUDA GPU thread function
/*
Version using software scheduling of thread blocks.
The idea here is to launch of fixed number of worker blocks to fill the
machine, and have each block loop over the available work until it is all done.
We use a counter in global memory to keep track of which blocks have been
completed. The counter is incremented atomically by each worker block.
This method can achieve higher performance when blocks take a wide range of
different times to complete.
*/
__device__ unsigned int blockCounter; // global counter, initialized to zero before kernel launch
template<class T>
__global__ void Mandelbrot0_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff,
const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame,
const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
__syncthreads();
if (blockIndex >= numBlocks) break; // finish
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Calculate the location
const T xPos = (T)ix * scale + xOff;
const T yPos = (T)iy * scale + yOff;
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrot<T>(xPos, yPos, xJP, yJP, crunch, isJ);
// int m = blockIdx.x; // uncomment to see scheduling order
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int pixel = imageW * iy + ix;
if (frame == 0) {
color.w = 0;
dst[pixel] = color;
} else {
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1;
dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1;
dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1;
}
}
}
} // Mandelbrot0
// The Mandelbrot CUDA GPU thread function (double single version)
__global__ void MandelbrotDS0_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1,
const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale,
const uchar4 colors, const int frame, const int animationFrame, const int gridWidth,
const int numBlocks, const bool isJ)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
// printf("add %i %i \n", blockIdx.x, blockIdx.y) ;
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
__syncthreads();
if (blockIndex >= numBlocks) break; // finish
// printf("run %i %i \n", blockX, blockY ) ;
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Calculate the location
float xPos0 = (float)ix * scale;
float xPos1 = 0.0f;
float yPos0 = (float)iy * scale;
float yPos1 = 0.0f;
dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1);
dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1);
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int pixel = imageW * iy + ix;
if (frame == 0) {
color.w = 0;
dst[pixel] = color;
} else {
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1;
dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1;
dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1;
}
}
}
} // MandelbrotDS0
// The Mandelbrot secondary AA pass CUDA GPU thread function
template<class T>
__global__ void Mandelbrot1_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff,
const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame,
const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
__syncthreads();
if (blockIndex >= numBlocks) break; // finish
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Get the current pixel color
int pixel = imageW * iy + ix;
uchar4 pixelColor = dst[pixel];
int count = 0;
// Search for pixels out of tolerance surrounding the current pixel
if (ix > 0)
count += CheckColors(pixelColor, dst[pixel - 1]);
if (ix + 1 < imageW)
count += CheckColors(pixelColor, dst[pixel + 1]);
if (iy > 0)
count += CheckColors(pixelColor, dst[pixel - imageW]);
if (iy + 1 < imageH)
count += CheckColors(pixelColor, dst[pixel + imageW]);
if (count) {
// Calculate the location
const T xPos = (T)ix * scale + xOff;
const T yPos = (T)iy * scale + yOff;
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrot(xPos, yPos, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1;
dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1;
dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1;
}
}
}
} // Mandelbrot1
// The Mandelbrot secondary AA pass CUDA GPU thread function (double single version)
__global__ void MandelbrotDS1_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch,
const float xOff0, const float xOff1, const float yOff0, const float yOff1,
const float xJP, const float yJP, const float scale, const uchar4 colors,
const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
__syncthreads();
if (blockIndex >= numBlocks) break; // finish
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Get the current pixel color
int pixel = imageW * iy + ix;
uchar4 pixelColor = dst[pixel];
int count = 0;
// Search for pixels out of tolerance surrounding the current pixel
if (ix > 0)
count += CheckColors(pixelColor, dst[pixel - 1]);
if (ix + 1 < imageW)
count += CheckColors(pixelColor, dst[pixel + 1]);
if (iy > 0)
count += CheckColors(pixelColor, dst[pixel - imageW]);
if (iy + 1 < imageH)
count += CheckColors(pixelColor, dst[pixel + imageW]);
if (count) {
// Calculate the location
float xPos0 = (float)ix * scale;
float xPos1 = 0.0f;
float yPos0 = (float)iy * scale;
float yPos1 = 0.0f;
dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1);
dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1);
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1;
dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1;
dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1;
}
}
}
} // MandelbrotDS1
// The host CPU Mandebrot thread spawner
void RunMandelbrot0_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff,
const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame,
const int animationFrame, const int mode, const int numSMs, const bool isJ)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
// zero block counter
unsigned int hBlockCounter = 0;
cutilSafeCall( hipMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, hipMemcpyHostToDevice ) );
int numWorkerBlocks = numSMs;
switch(mode) {
default:
case 0:
hipLaunchKernelGGL(( Mandelbrot0_sm11<float>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, (float)xOff, (float)yOff,
(float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
case 1:
float x0, x1, y0, y1;
dsdeq(x0, x1, xOff);
dsdeq(y0, y1, yOff);
hipLaunchKernelGGL(( MandelbrotDS0_sm11), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, x0, x1, y0, y1,
xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
case 2:
hipLaunchKernelGGL(( Mandelbrot0_sm11<double>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, xOff, yOff,
xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
}
cutilCheckMsg("Mandelbrot0_sm11 kernel execution failed.\n");
} // RunMandelbrot0
// The host CPU Mandebrot thread spawner
void RunMandelbrot1_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff,
const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame,
const int animationFrame, const int mode, const int numSMs, const bool isJ)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
// zero block counter
unsigned int hBlockCounter = 0;
cutilSafeCall( hipMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, hipMemcpyHostToDevice ) );
int numWorkerBlocks = numSMs;
switch(mode) {
default:
case 0:
hipLaunchKernelGGL(( Mandelbrot1_sm11<float>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, (float)xOff, (float)yOff,
(float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
case 1:
float x0, x1, y0, y1;
dsdeq(x0, x1, xOff);
dsdeq(y0, y1, yOff);
hipLaunchKernelGGL(( MandelbrotDS1_sm11), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, x0, x1, y0, y1,
xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
case 2:
hipLaunchKernelGGL(( Mandelbrot1_sm11<double>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, xOff, yOff,
xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
}
cutilCheckMsg("Mandelbrot1_sm11 kernel execution failed.\n");
} // RunMandelbrot1
| cd2092ef064555f375040881979d5ce8dbb5b7c9.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include "cutil_inline.h"
#include "Mandelbrot_kernel.h"
#include "Mandelbrot_kernel.cu"
// The Mandelbrot CUDA GPU thread function
/*
Version using software scheduling of thread blocks.
The idea here is to launch of fixed number of worker blocks to fill the
machine, and have each block loop over the available work until it is all done.
We use a counter in global memory to keep track of which blocks have been
completed. The counter is incremented atomically by each worker block.
This method can achieve higher performance when blocks take a wide range of
different times to complete.
*/
__device__ unsigned int blockCounter; // global counter, initialized to zero before kernel launch
template<class T>
__global__ void Mandelbrot0_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff,
const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame,
const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
__syncthreads();
if (blockIndex >= numBlocks) break; // finish
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Calculate the location
const T xPos = (T)ix * scale + xOff;
const T yPos = (T)iy * scale + yOff;
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrot<T>(xPos, yPos, xJP, yJP, crunch, isJ);
// int m = blockIdx.x; // uncomment to see scheduling order
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int pixel = imageW * iy + ix;
if (frame == 0) {
color.w = 0;
dst[pixel] = color;
} else {
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1;
dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1;
dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1;
}
}
}
} // Mandelbrot0
// The Mandelbrot CUDA GPU thread function (double single version)
__global__ void MandelbrotDS0_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1,
const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale,
const uchar4 colors, const int frame, const int animationFrame, const int gridWidth,
const int numBlocks, const bool isJ)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
// printf("add %i %i \n", blockIdx.x, blockIdx.y) ;
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
__syncthreads();
if (blockIndex >= numBlocks) break; // finish
// printf("run %i %i \n", blockX, blockY ) ;
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Calculate the location
float xPos0 = (float)ix * scale;
float xPos1 = 0.0f;
float yPos0 = (float)iy * scale;
float yPos1 = 0.0f;
dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1);
dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1);
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int pixel = imageW * iy + ix;
if (frame == 0) {
color.w = 0;
dst[pixel] = color;
} else {
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1;
dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1;
dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1;
}
}
}
} // MandelbrotDS0
// The Mandelbrot secondary AA pass CUDA GPU thread function
template<class T>
__global__ void Mandelbrot1_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff,
const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame,
const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
__syncthreads();
if (blockIndex >= numBlocks) break; // finish
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Get the current pixel color
int pixel = imageW * iy + ix;
uchar4 pixelColor = dst[pixel];
int count = 0;
// Search for pixels out of tolerance surrounding the current pixel
if (ix > 0)
count += CheckColors(pixelColor, dst[pixel - 1]);
if (ix + 1 < imageW)
count += CheckColors(pixelColor, dst[pixel + 1]);
if (iy > 0)
count += CheckColors(pixelColor, dst[pixel - imageW]);
if (iy + 1 < imageH)
count += CheckColors(pixelColor, dst[pixel + imageW]);
if (count) {
// Calculate the location
const T xPos = (T)ix * scale + xOff;
const T yPos = (T)iy * scale + yOff;
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrot(xPos, yPos, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1;
dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1;
dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1;
}
}
}
} // Mandelbrot1
// The Mandelbrot secondary AA pass CUDA GPU thread function (double single version)
__global__ void MandelbrotDS1_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch,
const float xOff0, const float xOff1, const float yOff0, const float yOff1,
const float xJP, const float yJP, const float scale, const uchar4 colors,
const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
__syncthreads();
if (blockIndex >= numBlocks) break; // finish
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH)) {
// Get the current pixel color
int pixel = imageW * iy + ix;
uchar4 pixelColor = dst[pixel];
int count = 0;
// Search for pixels out of tolerance surrounding the current pixel
if (ix > 0)
count += CheckColors(pixelColor, dst[pixel - 1]);
if (ix + 1 < imageW)
count += CheckColors(pixelColor, dst[pixel + 1]);
if (iy > 0)
count += CheckColors(pixelColor, dst[pixel - imageW]);
if (iy + 1 < imageH)
count += CheckColors(pixelColor, dst[pixel + imageW]);
if (count) {
// Calculate the location
float xPos0 = (float)ix * scale;
float xPos1 = 0.0f;
float yPos0 = (float)iy * scale;
float yPos1 = 0.0f;
dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1);
dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1);
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1;
dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1;
dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1;
}
}
}
} // MandelbrotDS1
// The host CPU Mandebrot thread spawner
void RunMandelbrot0_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff,
const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame,
const int animationFrame, const int mode, const int numSMs, const bool isJ)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
// zero block counter
unsigned int hBlockCounter = 0;
cutilSafeCall( cudaMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, cudaMemcpyHostToDevice ) );
int numWorkerBlocks = numSMs;
switch(mode) {
default:
case 0:
Mandelbrot0_sm11<float><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, (float)xOff, (float)yOff,
(float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
case 1:
float x0, x1, y0, y1;
dsdeq(x0, x1, xOff);
dsdeq(y0, y1, yOff);
MandelbrotDS0_sm11<<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, x0, x1, y0, y1,
xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
case 2:
Mandelbrot0_sm11<double><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, xOff, yOff,
xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
}
cutilCheckMsg("Mandelbrot0_sm11 kernel execution failed.\n");
} // RunMandelbrot0
// The host CPU Mandebrot thread spawner
void RunMandelbrot1_sm11(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff,
const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame,
const int animationFrame, const int mode, const int numSMs, const bool isJ)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
// zero block counter
unsigned int hBlockCounter = 0;
cutilSafeCall( cudaMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, cudaMemcpyHostToDevice ) );
int numWorkerBlocks = numSMs;
switch(mode) {
default:
case 0:
Mandelbrot1_sm11<float><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, (float)xOff, (float)yOff,
(float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
case 1:
float x0, x1, y0, y1;
dsdeq(x0, x1, xOff);
dsdeq(y0, y1, yOff);
MandelbrotDS1_sm11<<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, x0, x1, y0, y1,
xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
case 2:
Mandelbrot1_sm11<double><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, xOff, yOff,
xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x*grid.y, isJ);
break;
}
cutilCheckMsg("Mandelbrot1_sm11 kernel execution failed.\n");
} // RunMandelbrot1
|
7397fb5bdf894df901de0f7e1f1b919a5bed3503.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// CUDA runtime
// Helper functions and utilities to work with CUDA
//Standard C library
#define subCOL 5248
#define COL 5248
#define ROW 358
#define WARPABLEROW 512
#define blocksize 256
#define subMatDim subCOL*WARPABLEROW
#define targetMatDim ROW * COL
__global__ void reduce5(int *g_idata, int *g_odata, int g_size)
{
__shared__ int sdata[blocksize];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid < 32)
{
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
} | 7397fb5bdf894df901de0f7e1f1b919a5bed3503.cu | #include "includes.h"
// CUDA runtime
// Helper functions and utilities to work with CUDA
//Standard C library
#define subCOL 5248
#define COL 5248
#define ROW 358
#define WARPABLEROW 512
#define blocksize 256
#define subMatDim subCOL*WARPABLEROW
#define targetMatDim ROW * COL
__global__ void reduce5(int *g_idata, int *g_odata, int g_size)
{
__shared__ int sdata[blocksize];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid < 32)
{
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
} |
12209be151eeb082cc8ebe5a9a5d195da6ee349f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "types.h"
#include "gnn.h"
#include "cuda_helper.h"
template<typename DT, int dim>
TensorAccessorR<DT, dim>::TensorAccessorR(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime,
ResourceManager* manager)
{
const AccessorRO<DT, dim> acc(region, fid);
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
std::set<Memory> memories;
region.get_memories(memories);
assert(memories.size() == 1);
memory = *memories.begin();
if (memory.kind() == Memory::GPU_FB_MEM) {
fbCache = NULL;
} else if (memory.kind() == Memory::Z_COPY_MEM) {
int id = manager->assign(region, rect.volume());
assert(id >= 0);
fbCache = (DT*) manager->fbCache[id].ptr;
checkCUDA(hipMemcpyAsync(fbCache, ptr, rect.volume() * sizeof(DT),
hipMemcpyHostToDevice));
} else {
assert(false);
}
}
template<typename DT>
__global__
void zero_array(DT* ptr, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = 0;
}
}
template<typename DT, int dim>
TensorAccessorW<DT, dim>::TensorAccessorW(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime,
ResourceManager* manager,
bool readOutput)
{
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
if (readOutput) {
const AccessorRW<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
} else {
const AccessorWO<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
}
std::set<Memory> memories;
region.get_memories(memories);
assert(memories.size() == 1);
memory = *memories.begin();
if (memory.kind() == Memory::GPU_FB_MEM) {
fbCache = NULL;
} else if (memory.kind() == Memory::Z_COPY_MEM) {
int id = manager->assign(region, rect.volume());
assert(id >= 0);
fbCache = (DT*) manager->fbCache[id].ptr;
if (readOutput) {
checkCUDA(hipMemcpyAsync(fbCache, ptr, rect.volume() * sizeof(DT),
hipMemcpyHostToDevice));
} else {
// Currently we zero init the fbCache if not read output
hipLaunchKernelGGL(( zero_array<DT>), dim3(GET_BLOCKS(rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
fbCache, rect.volume());
}
} else {
assert(false);
}
}
template class TensorAccessorR<NodeStruct, 1>;
template class TensorAccessorR<EdgeStruct, 1>;
template class TensorAccessorR<DATATYPE, 1>;
template class TensorAccessorR<DATATYPE, 2>;
template class TensorAccessorR<DATATYPE, 3>;
template class TensorAccessorR<int, 2>;
template class TensorAccessorW<DATATYPE, 1>;
template class TensorAccessorW<DATATYPE, 2>;
template class TensorAccessorW<DATATYPE, 3>;
| 12209be151eeb082cc8ebe5a9a5d195da6ee349f.cu | #include "types.h"
#include "gnn.h"
#include "cuda_helper.h"
template<typename DT, int dim>
TensorAccessorR<DT, dim>::TensorAccessorR(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime,
ResourceManager* manager)
{
const AccessorRO<DT, dim> acc(region, fid);
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
std::set<Memory> memories;
region.get_memories(memories);
assert(memories.size() == 1);
memory = *memories.begin();
if (memory.kind() == Memory::GPU_FB_MEM) {
fbCache = NULL;
} else if (memory.kind() == Memory::Z_COPY_MEM) {
int id = manager->assign(region, rect.volume());
assert(id >= 0);
fbCache = (DT*) manager->fbCache[id].ptr;
checkCUDA(cudaMemcpyAsync(fbCache, ptr, rect.volume() * sizeof(DT),
cudaMemcpyHostToDevice));
} else {
assert(false);
}
}
template<typename DT>
__global__
void zero_array(DT* ptr, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = 0;
}
}
template<typename DT, int dim>
TensorAccessorW<DT, dim>::TensorAccessorW(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime,
ResourceManager* manager,
bool readOutput)
{
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
if (readOutput) {
const AccessorRW<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
} else {
const AccessorWO<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
}
std::set<Memory> memories;
region.get_memories(memories);
assert(memories.size() == 1);
memory = *memories.begin();
if (memory.kind() == Memory::GPU_FB_MEM) {
fbCache = NULL;
} else if (memory.kind() == Memory::Z_COPY_MEM) {
int id = manager->assign(region, rect.volume());
assert(id >= 0);
fbCache = (DT*) manager->fbCache[id].ptr;
if (readOutput) {
checkCUDA(cudaMemcpyAsync(fbCache, ptr, rect.volume() * sizeof(DT),
cudaMemcpyHostToDevice));
} else {
// Currently we zero init the fbCache if not read output
zero_array<DT><<<GET_BLOCKS(rect.volume()), CUDA_NUM_THREADS>>>(
fbCache, rect.volume());
}
} else {
assert(false);
}
}
template class TensorAccessorR<NodeStruct, 1>;
template class TensorAccessorR<EdgeStruct, 1>;
template class TensorAccessorR<DATATYPE, 1>;
template class TensorAccessorR<DATATYPE, 2>;
template class TensorAccessorR<DATATYPE, 3>;
template class TensorAccessorR<int, 2>;
template class TensorAccessorW<DATATYPE, 1>;
template class TensorAccessorW<DATATYPE, 2>;
template class TensorAccessorW<DATATYPE, 3>;
|
4ef4bb88019629c3f332c66c1204a18abc295e20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "transform.h"
__device__ float op(float d1,float *params) {
return fabsf(d1);
}
extern "C"
__global__ void abs_strided_float(int n,int idx,float *dy,int incy,float *params,float *result) {
transform(n,idx,dy,incy,params,result);
}
| 4ef4bb88019629c3f332c66c1204a18abc295e20.cu | #include "transform.h"
__device__ float op(float d1,float *params) {
return fabsf(d1);
}
extern "C"
__global__ void abs_strided_float(int n,int idx,float *dy,int incy,float *params,float *result) {
transform(n,idx,dy,incy,params,result);
}
|
34efa7e20129613525c45969fa1714fc539759df.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_A_mul_Bs_64.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int mx = 1;
int ns = 1;
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double *sval = NULL;
hipMalloc(&sval, XSIZE*YSIZE);
int *srow = NULL;
hipMalloc(&srow, XSIZE*YSIZE);
int *scol = NULL;
hipMalloc(&scol, XSIZE*YSIZE);
double *k = NULL;
hipMalloc(&k, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_A_mul_Bs_64), dim3(gridBlock),dim3(threadBlock), 0, 0, mx,ns,x,sval,srow,scol,k);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_A_mul_Bs_64), dim3(gridBlock),dim3(threadBlock), 0, 0, mx,ns,x,sval,srow,scol,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_A_mul_Bs_64), dim3(gridBlock),dim3(threadBlock), 0, 0, mx,ns,x,sval,srow,scol,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 34efa7e20129613525c45969fa1714fc539759df.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_A_mul_Bs_64.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int mx = 1;
int ns = 1;
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double *sval = NULL;
cudaMalloc(&sval, XSIZE*YSIZE);
int *srow = NULL;
cudaMalloc(&srow, XSIZE*YSIZE);
int *scol = NULL;
cudaMalloc(&scol, XSIZE*YSIZE);
double *k = NULL;
cudaMalloc(&k, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_A_mul_Bs_64<<<gridBlock,threadBlock>>>(mx,ns,x,sval,srow,scol,k);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_A_mul_Bs_64<<<gridBlock,threadBlock>>>(mx,ns,x,sval,srow,scol,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_A_mul_Bs_64<<<gridBlock,threadBlock>>>(mx,ns,x,sval,srow,scol,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
156fd193ce95bd4a449564d9be4440a6b517d31c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// function to add the elements of two arrays
__global__ // functionkernel
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20; // 1M elements
//float *x = new float[N];
//float *y = new float[N];
// Allocate Unified Memory -- accessible from CPU or GPU
float *x, *y;
hipMallocManaged(&x, N*sizeof(float)); //
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
//add(N, x, y);
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y);
// synchronization
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
//delete [] x;
//delete [] y;
hipFree(x); //
hipFree(y);
return 0;
}
| 156fd193ce95bd4a449564d9be4440a6b517d31c.cu | #include <iostream>
#include <math.h>
// function to add the elements of two arrays
__global__ // 将function转换为kernel
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20; // 1M elements
//float *x = new float[N];
//float *y = new float[N];
// Allocate Unified Memory -- accessible from CPU or GPU
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float)); // 申请内存
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
//add(N, x, y);
add<<<1, 1>>>(N, x, y);
// synchronization 同步
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
//delete [] x;
//delete [] y;
cudaFree(x); // 销毁内存
cudaFree(y);
return 0;
}
|
f7bfb463d1baa3437b1dc981bcc4c86549752577.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
#define N ( 1 << 27 )
#define THREADS_PER_BLOCK 256
#define FLOATTYPE_T float
__global__ void sumReduction(int n, FLOATTYPE_T *in, FLOATTYPE_T *sum)
{
/* calculate global index in the array */
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
/* return if my global index is larger than the array size */
if( globalIndex >= n ) return;
/* temp for accumulating result */
FLOATTYPE_T result = 0.0;
/* loop for calculating the result */
for( int i = 0; i < N; i++ )
{
result += in[i];
} /* end for */
/* write the result to global memory */
*sum = result;
return;
}
int main()
{
FLOATTYPE_T *h_in, h_sum, cpu_sum;
FLOATTYPE_T *d_in, *d_sum;
int size = N;
int memBytes = size * sizeof( FLOATTYPE_T );
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of in, out */
checkCUDA( hipMalloc( &d_in, memBytes ) );
checkCUDA( hipMalloc( &d_sum, sizeof(FLOATTYPE_T) ) );
/* allocate space for host copies of in, out and setup input values */
h_in = (FLOATTYPE_T *)malloc( memBytes );
for( int i = 0; i < size; i++ )
{
h_in[i] = FLOATTYPE_T( rand() ) / ( FLOATTYPE_T (RAND_MAX) + 1.0 );
if( i % 2 == 0 ) h_in[i] = -h_in[i];
}
h_sum = 0.0;
cpu_sum = 0.0;
/* copy inputs to device */
checkCUDA( hipMemcpy( d_in, h_in, memBytes, hipMemcpyHostToDevice ) );
checkCUDA( hipMemset( d_sum, 0, sizeof(FLOATTYPE_T) ) );
/* calculate block and grid sizes */
dim3 threads( THREADS_PER_BLOCK, 1, 1);
dim3 blocks( (size / threads.x) + 1, 1, 1);
/* start the timers */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
/* launch the kernel on the GPU
Since it's naive we'll only launch one thread total. This is a serial
algorithm */
hipLaunchKernelGGL(( sumReduction), dim3(1), dim3(1) , 0, 0, size, d_in, d_sum );
checkKERNEL()
/* stop the timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
/* print perf info */
printf("Total elements is %d, %f GB\n", size, sizeof(FLOATTYPE_T)*
(double)size * 1.e-9 );
printf("GPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
/* copy result back to host */
checkCUDA( hipMemcpy( &h_sum, d_sum, sizeof(FLOATTYPE_T),
hipMemcpyDeviceToHost ) );
/* start timer for CPU version */
checkCUDA( hipEventRecord( start, 0 ) );
/* run a naive CPU reduction */
for( int i = 0; i < size; i++ )
{
cpu_sum += h_in[i];
} /* end for */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
/* print CPU perf info */
printf("CPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
/* check result for accuracy */
FLOATTYPE_T diff = abs( cpu_sum - h_sum );
if( diff / abs(h_sum) < 0.001 ) printf("PASS\n");
else
{
printf("FAIL\n");
printf("Error is %f\n", diff / h_sum );
} /* end else */
/* clean up */
free(h_in);
checkCUDA( hipFree( d_in ) );
checkCUDA( hipFree( d_sum ) );
checkCUDA( hipDeviceReset() );
return 0;
} /* end main */
| f7bfb463d1baa3437b1dc981bcc4c86549752577.cu | /*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
#define N ( 1 << 27 )
#define THREADS_PER_BLOCK 256
#define FLOATTYPE_T float
__global__ void sumReduction(int n, FLOATTYPE_T *in, FLOATTYPE_T *sum)
{
/* calculate global index in the array */
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
/* return if my global index is larger than the array size */
if( globalIndex >= n ) return;
/* temp for accumulating result */
FLOATTYPE_T result = 0.0;
/* loop for calculating the result */
for( int i = 0; i < N; i++ )
{
result += in[i];
} /* end for */
/* write the result to global memory */
*sum = result;
return;
}
int main()
{
FLOATTYPE_T *h_in, h_sum, cpu_sum;
FLOATTYPE_T *d_in, *d_sum;
int size = N;
int memBytes = size * sizeof( FLOATTYPE_T );
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of in, out */
checkCUDA( cudaMalloc( &d_in, memBytes ) );
checkCUDA( cudaMalloc( &d_sum, sizeof(FLOATTYPE_T) ) );
/* allocate space for host copies of in, out and setup input values */
h_in = (FLOATTYPE_T *)malloc( memBytes );
for( int i = 0; i < size; i++ )
{
h_in[i] = FLOATTYPE_T( rand() ) / ( FLOATTYPE_T (RAND_MAX) + 1.0 );
if( i % 2 == 0 ) h_in[i] = -h_in[i];
}
h_sum = 0.0;
cpu_sum = 0.0;
/* copy inputs to device */
checkCUDA( cudaMemcpy( d_in, h_in, memBytes, cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemset( d_sum, 0, sizeof(FLOATTYPE_T) ) );
/* calculate block and grid sizes */
dim3 threads( THREADS_PER_BLOCK, 1, 1);
dim3 blocks( (size / threads.x) + 1, 1, 1);
/* start the timers */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
/* launch the kernel on the GPU
Since it's naive we'll only launch one thread total. This is a serial
algorithm */
sumReduction<<< 1, 1 >>>( size, d_in, d_sum );
checkKERNEL()
/* stop the timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
/* print perf info */
printf("Total elements is %d, %f GB\n", size, sizeof(FLOATTYPE_T)*
(double)size * 1.e-9 );
printf("GPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
/* copy result back to host */
checkCUDA( cudaMemcpy( &h_sum, d_sum, sizeof(FLOATTYPE_T),
cudaMemcpyDeviceToHost ) );
/* start timer for CPU version */
checkCUDA( cudaEventRecord( start, 0 ) );
/* run a naive CPU reduction */
for( int i = 0; i < size; i++ )
{
cpu_sum += h_in[i];
} /* end for */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
/* print CPU perf info */
printf("CPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
/* check result for accuracy */
FLOATTYPE_T diff = abs( cpu_sum - h_sum );
if( diff / abs(h_sum) < 0.001 ) printf("PASS\n");
else
{
printf("FAIL\n");
printf("Error is %f\n", diff / h_sum );
} /* end else */
/* clean up */
free(h_in);
checkCUDA( cudaFree( d_in ) );
checkCUDA( cudaFree( d_sum ) );
checkCUDA( cudaDeviceReset() );
return 0;
} /* end main */
|
634a1998e67d0a258a49d112d876b8d192ecdeeb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <helpers/DebugHelper.h>
#include <loops/legacy_ops.h>
#include <loops/transform_any.h>
#include <system/Environment.h>
#include <system/op_boilerplate.h>
#include <types/types.h>
using namespace simdOps;
template <typename X, typename Z, typename OpType>
SD_KERNEL void transformAnySimple(const void *x, const sd::LongType *xShapeInfo, int xRank, void *params, void *z,
const sd::LongType *zShapeInfo, int zRank, int *allocationPointer,
void *reductionPointer, const sd::LongType *tadShapeInfo,
const sd::LongType *tadOffsets) {
functions::transform::TransformAny<X, Z>::template transformCuda<OpType>(
x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
}
namespace functions {
namespace transform {
template <typename X, typename Y>
SD_HOST void TransformAny<X, Y>::executeTransformShaped(dim3 launchDims, hipStream_t *stream, const int opNum,
const void *x, const sd::LongType *xShape, int xRank,
void *extraParams, void *z, const sd::LongType *zShape,
int zRank, int *allocationPointer, void *reductionPointer,
const sd::LongType *tadShapeInfo,
const sd::LongType *tadOffsets) {
DISPATCH_BY_OPNUM_TT(intermediateShaped,
PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer,
reductionPointer, tadShapeInfo, tadOffsets),
TRANSFORM_ANY_OPS);
DEBUG_KERNEL(stream, opNum);
}
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void TransformAny<X, Z>::transformCuda(const void *vx, const sd::LongType *xShapeInfo, void *vparams,
void *vz, const sd::LongType *zShapeInfo, int *allocationPointer,
void *vreductionPointer, const sd::LongType *tadShapeInfo,
const sd::LongType *tadOffsets) {
auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
auto params = reinterpret_cast<X *>(vparams);
auto reductionPointer = reinterpret_cast<Z *>(vreductionPointer);
__shared__ sd::LongType xEws;
__shared__ sd::LongType zEws;
__shared__ char xOrder;
__shared__ char zOrder;
__shared__ sd::LongType length;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
zEws = shape::elementWiseStride(zShapeInfo);
xOrder = shape::order(xShapeInfo);
zOrder = shape::order(zShapeInfo);
length = shape::length(xShapeInfo);
}
__syncthreads();
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
if (xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') {
for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params);
} else {
if (vx == vz) {
for (sd::LongType i = tid; i < length; i += totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
z[xOffset] = OpType::op(x[xOffset], params);
}
} else {
for (sd::LongType i = tid; i < length; i += totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
auto zOffset = shape::getIndexOffset(i, zShapeInfo);
z[zOffset] = OpType::op(x[xOffset], params);
}
}
}
};
template <typename X, typename Z>
template <typename OpType>
SD_HOST void TransformAny<X, Z>::intermediateShaped(dim3 launchDims, hipStream_t *stream, const void *x,
const sd::LongType *xShape, int xRank, void *extraParams, void *z,
const sd::LongType *zShape, int zRank, int *allocationPointer,
void *reductionPointer, const sd::LongType *tadShapeInfo,
const sd::LongType *tadOffsets) {
hipLaunchKernelGGL(( transformAnySimple<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
sd::DebugHelper::checkErrorCode(stream, "transformAny(...) failed");
}
BUILD_DOUBLE_TEMPLATE(template class TransformAny, , SD_COMMON_TYPES, SD_COMMON_TYPES);
} // namespace transform
} // namespace functions
| 634a1998e67d0a258a49d112d876b8d192ecdeeb.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <helpers/DebugHelper.h>
#include <loops/legacy_ops.h>
#include <loops/transform_any.h>
#include <system/Environment.h>
#include <system/op_boilerplate.h>
#include <types/types.h>
using namespace simdOps;
template <typename X, typename Z, typename OpType>
SD_KERNEL void transformAnySimple(const void *x, const sd::LongType *xShapeInfo, int xRank, void *params, void *z,
const sd::LongType *zShapeInfo, int zRank, int *allocationPointer,
void *reductionPointer, const sd::LongType *tadShapeInfo,
const sd::LongType *tadOffsets) {
functions::transform::TransformAny<X, Z>::template transformCuda<OpType>(
x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
}
namespace functions {
namespace transform {
template <typename X, typename Y>
SD_HOST void TransformAny<X, Y>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, const int opNum,
const void *x, const sd::LongType *xShape, int xRank,
void *extraParams, void *z, const sd::LongType *zShape,
int zRank, int *allocationPointer, void *reductionPointer,
const sd::LongType *tadShapeInfo,
const sd::LongType *tadOffsets) {
DISPATCH_BY_OPNUM_TT(intermediateShaped,
PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer,
reductionPointer, tadShapeInfo, tadOffsets),
TRANSFORM_ANY_OPS);
DEBUG_KERNEL(stream, opNum);
}
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void TransformAny<X, Z>::transformCuda(const void *vx, const sd::LongType *xShapeInfo, void *vparams,
void *vz, const sd::LongType *zShapeInfo, int *allocationPointer,
void *vreductionPointer, const sd::LongType *tadShapeInfo,
const sd::LongType *tadOffsets) {
auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
auto params = reinterpret_cast<X *>(vparams);
auto reductionPointer = reinterpret_cast<Z *>(vreductionPointer);
__shared__ sd::LongType xEws;
__shared__ sd::LongType zEws;
__shared__ char xOrder;
__shared__ char zOrder;
__shared__ sd::LongType length;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
zEws = shape::elementWiseStride(zShapeInfo);
xOrder = shape::order(xShapeInfo);
zOrder = shape::order(zShapeInfo);
length = shape::length(xShapeInfo);
}
__syncthreads();
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
if (xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') {
for (int i = tid; i < length; i += totalThreads) z[i * zEws] = OpType::op(x[i * xEws], params);
} else {
if (vx == vz) {
for (sd::LongType i = tid; i < length; i += totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
z[xOffset] = OpType::op(x[xOffset], params);
}
} else {
for (sd::LongType i = tid; i < length; i += totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
auto zOffset = shape::getIndexOffset(i, zShapeInfo);
z[zOffset] = OpType::op(x[xOffset], params);
}
}
}
};
template <typename X, typename Z>
template <typename OpType>
SD_HOST void TransformAny<X, Z>::intermediateShaped(dim3 launchDims, cudaStream_t *stream, const void *x,
const sd::LongType *xShape, int xRank, void *extraParams, void *z,
const sd::LongType *zShape, int zRank, int *allocationPointer,
void *reductionPointer, const sd::LongType *tadShapeInfo,
const sd::LongType *tadOffsets) {
transformAnySimple<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
sd::DebugHelper::checkErrorCode(stream, "transformAny(...) failed");
}
BUILD_DOUBLE_TEMPLATE(template class TransformAny, , SD_COMMON_TYPES, SD_COMMON_TYPES);
} // namespace transform
} // namespace functions
|
708027f864914f8905304524c3bdee0aeb530f49.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <rocblas.h>
#include <cstring>
#include "cutlass_unit_test.h"
#include "tools/util/half.h"
#include "tools/util/host_matrix.h"
#include "tools/util/tensor_view_io.h"
#include "cutlass/gemm/volta884_multiplicand.h"
#include "cutlass/gemm/volta884_multiply_add.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
#if CUTLASS_ENABLE_TENSOR_CORE_MMA
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Simplified GEMM: computes one threadblock-scoped matrix product.
//
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to verify a tile of data loaded from GMEM, stored to SMEM, and loaded into RF computes
/// the expected mma.sync product
template <
typename MultiplicandA,
typename MultiplicandB,
typename ScalarC
>
__global__ void test_volta884_matrix_product(
typename MultiplicandA::LoadIterator::Params load_A_params,
typename MultiplicandB::LoadIterator::Params load_B_params,
float *C,
int ldc,
int active_k_idx) {
// Define thread-block scoped load iterators
typename MultiplicandA::LoadIterator load_A_iterator(load_A_params);
typename MultiplicandB::LoadIterator load_B_iterator(load_B_params);
// Define shared memory buffers
static int const kSmemAElements =
cutlass::ShapeCount<typename MultiplicandA::StoreIterator::OperandShape>::kCount;
static int const kSmemBElements =
cutlass::ShapeCount<typename MultiplicandB::StoreIterator::OperandShape>::kCount;
__shared__ uint16_t smem_A_buffer[kSmemAElements];
__shared__ uint16_t smem_B_buffer[kSmemBElements];
// Instantiate thread-block-scoped store iterators
typename MultiplicandA::StoreIterator::Params store_A_params(reinterpret_cast<half *>(&smem_A_buffer[0]));
typename MultiplicandB::StoreIterator::Params store_B_params(reinterpret_cast<half *>(&smem_B_buffer[0]));
typename MultiplicandA::StoreIterator store_A_iterator(store_A_params);
typename MultiplicandB::StoreIterator store_B_iterator(store_B_params);
// Load thread-block scoped fragments
typename MultiplicandA::LoadIterator::Fragment threadblock_A_frag;
typename MultiplicandB::LoadIterator::Fragment threadblock_B_frag;
__syncthreads();
// A operand
load_A_iterator.load(threadblock_A_frag);
store_A_iterator.store(threadblock_A_frag);
// Barrier to enforce SMEM consistency
__syncthreads();
// B operand
load_B_iterator.load(threadblock_B_frag);
store_B_iterator.store(threadblock_B_frag);
// Barrier to enforce SMEM consistency
__syncthreads();
// Instantiate warp-scoped load iterators
typename MultiplicandA::WarpLoadIterator::Params warp_A_params(reinterpret_cast<half const *>(&smem_A_buffer[0]));
typename MultiplicandB::WarpLoadIterator::Params warp_B_params(reinterpret_cast<half const *>(&smem_B_buffer[0]));
typename MultiplicandA::WarpLoadIterator warp_load_A(warp_A_params);
typename MultiplicandB::WarpLoadIterator warp_load_B(warp_B_params);
// Instantiate a multiply-add object specialized for Volta mma.sync
typedef cutlass::gemm::Volta884MultiplyAdd<
typename MultiplicandA::WarpTile,
MultiplicandA::kLayout,
half,
MultiplicandB::kLayout,
half,
ScalarC
> MultiplyAdd;
typedef cutlass::gemm::Volta884NaiveEpilogue<
ScalarC,
typename MultiplicandA::WarpDelta,
typename MultiplyAdd::Iterations
> NaiveEpilogue;
MultiplyAdd multiply_add;
NaiveEpilogue epilogue(C, ldc);
// Initialize accumulator fragment
typename MultiplyAdd::Accumulators accumulators;
for (int i = 0; i < MultiplyAdd::Accumulators::kElements; ++i) {
accumulators[i] = threadIdx.x;
}
epilogue.clear(accumulators);
// Iterate over the K dimension of the threadblock tile
#pragma unroll
for (int k_idx = 0; k_idx < MultiplicandA::Tile::kD / MultiplyAdd::WarpTile::kD; ++k_idx) {
if (active_k_idx < 0 || active_k_idx == k_idx) {
typename MultiplicandA::WarpLoadIterator::Fragment warp_A_frag;
typename MultiplicandB::WarpLoadIterator::Fragment warp_B_frag;
// Load warp-scoped fragments
warp_load_A.load(warp_A_frag, cutlass::make_Coord(k_idx, 0, 0, 0));
warp_load_B.load(warp_B_frag, cutlass::make_Coord(k_idx, 0, 0, 0));
// Compute accumulated matrix product
multiply_add.multiply_add(warp_A_frag, warp_B_frag, accumulators, accumulators);
}
}
// Store accumulator tile
epilogue.store(accumulators);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Identifies multiplicand of GEMM (A or B)
cutlass::MatrixLayout::Kind LayoutA,
/// Specifies layout of data in source memory
cutlass::MatrixLayout::Kind LayoutB,
/// Accumulator type
typename ScalarC,
/// Specifies threadblock tile shape
typename Tile,
/// Specifies the warp tile shape
typename WarpTile,
/// Specifies the number of participating warps
int WarpCount,
/// Specifies the delta between warp accesses along the outer dimension
typename WarpDelta
>
struct Volta884MatrixProductTestbed {
//
// Type definitions
//
typedef cutlass::gemm::Volta884Multiplicand<
cutlass::GemmOperand::kA,
LayoutA,
Tile,
WarpTile,
WarpCount,
WarpDelta> MultiplicandA;
typedef cutlass::gemm::Volta884Multiplicand<
cutlass::GemmOperand::kB,
LayoutB,
Tile,
WarpTile,
WarpCount,
WarpDelta> MultiplicandB;
/// Generates random elements
template <typename T>
struct RandomGenerator {
RandomGenerator(
int seed = -1
) { srand(seed); }
T operator()() {
int val = (rand() % 29) - 13;
return T(val);
}
};
/// Depth of an mma.sync instruction
static int const kWarpK = 4;
//
// Data members
//
cutlass::HostMatrix<cutlass::half_t> tensor_A;
cutlass::HostMatrix<cutlass::half_t> tensor_B;
cutlass::HostMatrix<ScalarC> tensor_C;
cutlass::HostMatrix<ScalarC> tensor_Ref;
//
// Methods
//
Volta884MatrixProductTestbed() {
tensor_A.resize(cutlass::make_Coord(Tile::kW, Tile::kD), LayoutA);
tensor_B.resize(cutlass::make_Coord(Tile::kD, Tile::kH), LayoutB);
tensor_C.resize(cutlass::make_Coord(Tile::kW, Tile::kH), cutlass::MatrixLayout::kColumnMajor);
tensor_Ref.resize(cutlass::make_Coord(Tile::kW, Tile::kH), cutlass::MatrixLayout::kColumnMajor);
}
/// Runs a test case
bool run_once(int seed, int active_k_idx = -1) {
#if 0
// For debugging, it helps to see sequential elements
tensor_A.fill_sequential();
tensor_B.fill_identity();
#else
// Fill with random elements
tensor_A.fill_random(RandomGenerator<cutlass::half_t>(seed + 53));
tensor_B.fill_random(RandomGenerator<cutlass::half_t>(seed + 97));
#endif
if (active_k_idx >= 0) {
// overwrite all but the active k index with zeros
int const m_stride = (LayoutA == cutlass::MatrixLayout::kRowMajor ? Tile::kD : 1);
int const a_k_stride = (LayoutA == cutlass::MatrixLayout::kRowMajor ? 1 : Tile::kW);
int const n_stride = (LayoutB == cutlass::MatrixLayout::kRowMajor ? 1 : Tile::kD);
int const b_k_stride = (LayoutB == cutlass::MatrixLayout::kRowMajor ? Tile::kH : 1);
for (int k_idx = 0; k_idx < Tile::kD / kWarpK; ++k_idx) {
if (active_k_idx != k_idx) {
for (int k = 0; k < kWarpK; ++k) {
for (int m = 0; m < Tile::kW; ++m) {
tensor_A.host_data()[m_stride * m + a_k_stride * (k_idx * kWarpK + k)] = 0;
}
for (int n = 0; n < Tile::kH; ++n) {
tensor_B.host_data()[n_stride * n + b_k_stride * (k_idx * kWarpK + k)] = 0;
}
}
}
}
}
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.fill(ScalarC(0));
tensor_Ref.fill(ScalarC(0));
tensor_C.sync_device();
// run kernel
dim3 grid(1, 1);
dim3 block(32 * WarpCount, 1, 1);
typename MultiplicandA::LoadIterator::Params load_A_params(
tensor_A.device_data(),
tensor_A.leading_dim() * 8,
tensor_A.leading_dim(),
8
);
typename MultiplicandB::LoadIterator::Params load_B_params(
tensor_B.device_data(),
tensor_B.leading_dim() * 8,
tensor_B.leading_dim(),
8
);
hipLaunchKernelGGL(( test_volta884_matrix_product<MultiplicandA, MultiplicandB, ScalarC>), dim3(grid), dim3(block) , 0, 0,
load_A_params,
load_B_params,
tensor_C.device_data(),
tensor_C.leading_dim(),
active_k_idx
);
EXPECT_EQ(hipDeviceSynchronize(), hipSuccess);
// Copy to host
tensor_C.sync_host();
// Compute reference
cutlass::reference::host::Gemm(
cutlass::gemm::GemmCoord(
tensor_A.size().column(),
tensor_Ref.size().column(),
tensor_Ref.size().row()),
ScalarC(1),
tensor_A,
tensor_B,
ScalarC(0),
tensor_Ref,
ScalarC(0));
// Assert bit-level equivalence
bool passed = tensor_Ref.bit_equals(tensor_C);
EXPECT_TRUE(passed)
<< "Incorrect matrix product\n"
<< "A =\n" << tensor_A
<< "\nB =\n" << tensor_B
<< "\nRef =\n" << tensor_Ref
<< "\nMMA=\n" << tensor_C;
return passed;
}
/// Executes a set of test cases containing unique, randomly chosen matrices and verifies
/// bit equivalence with the reference implementation.
bool run(int test_count = 16) {
bool passed = true;
#if 1
// Run several tests with deterministic seeds
for (int i = 0; i < test_count && passed; ++i) {
passed = run_once(i * 41 + i * 17);
}
#else
// For debugging, run the full matrix product with exactly one K-index non-zero
for (int k_idx = 0; passed && k_idx < Tile::kD / kWarpK; ++k_idx) {
passed = run_once(17, k_idx);
if (!passed) {
std::cout << "Failed on k_idx = " << k_idx
<< " [" << k_idx * kWarpK << ".." << (k_idx + 1) * kWarpK - 1 << "]" << std::endl;
}
}
#endif
return passed;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 64x64x32, 128x64x32, 64x128x32, 128x128x32, 256x128x32, 128x256x32, 64x64x128
//
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Congruous loading
//
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 64x64x32_32x32x4) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<4, 32, 32>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 128x64x32_64x32x4) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 64, 128>,
cutlass::Shape<4, 32, 64>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 64x128x32_32x64x4) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 128, 64>,
cutlass::Shape<4, 64, 32>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 64x64x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<4, 64, 64>,
1,
cutlass::Shape<1, 1, 1, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 64x64x128) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<128, 64, 64>,
cutlass::Shape<4, 64, 64>,
1,
cutlass::Shape<1, 1, 1, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 128x64x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 64, 128>,
cutlass::Shape<4, 64, 64>,
2,
cutlass::Shape<1, 1, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 64x128x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 128, 64>,
cutlass::Shape<4, 64, 64>,
2,
cutlass::Shape<1, 2, 1, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 128x128x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<4, 64, 64>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 256x128x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 128, 256>,
cutlass::Shape<4, 64, 64>,
8,
cutlass::Shape<1, 2, 4, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 128x256x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 256, 128>,
cutlass::Shape<4, 64, 64>,
8,
cutlass::Shape<1, 4, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Crosswise loading
//
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 64x64x32_32x32x4) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<4, 32, 32>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 128x64x32_64x32x4) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 64, 128>,
cutlass::Shape<4, 32, 64>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 64x128x32_32x64x4) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 128, 64>,
cutlass::Shape<4, 64, 32>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 64x64x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<4, 64, 64>,
1,
cutlass::Shape<1, 1, 1, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 128x64x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 64, 128>,
cutlass::Shape<4, 64, 64>,
2,
cutlass::Shape<1, 1, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 128x128x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<4, 64, 64>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 256x128x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 128, 256>,
cutlass::Shape<4, 64, 64>,
8,
cutlass::Shape<1, 2, 4, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 128x256x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 256, 128>,
cutlass::Shape<4, 64, 64>,
8,
cutlass::Shape<1, 4, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#endif // if defined(CUTLASS_ENABLE_TENSOR_CORE_MMA)
| 708027f864914f8905304524c3bdee0aeb530f49.cu | /***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cublas_v2.h>
#include <cstring>
#include "cutlass_unit_test.h"
#include "tools/util/half.h"
#include "tools/util/host_matrix.h"
#include "tools/util/tensor_view_io.h"
#include "cutlass/gemm/volta884_multiplicand.h"
#include "cutlass/gemm/volta884_multiply_add.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
#if CUTLASS_ENABLE_TENSOR_CORE_MMA
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Simplified GEMM: computes one threadblock-scoped matrix product.
//
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to verify a tile of data loaded from GMEM, stored to SMEM, and loaded into RF computes
/// the expected mma.sync product
template <
typename MultiplicandA,
typename MultiplicandB,
typename ScalarC
>
__global__ void test_volta884_matrix_product(
typename MultiplicandA::LoadIterator::Params load_A_params,
typename MultiplicandB::LoadIterator::Params load_B_params,
float *C,
int ldc,
int active_k_idx) {
// Define thread-block scoped load iterators
typename MultiplicandA::LoadIterator load_A_iterator(load_A_params);
typename MultiplicandB::LoadIterator load_B_iterator(load_B_params);
// Define shared memory buffers
static int const kSmemAElements =
cutlass::ShapeCount<typename MultiplicandA::StoreIterator::OperandShape>::kCount;
static int const kSmemBElements =
cutlass::ShapeCount<typename MultiplicandB::StoreIterator::OperandShape>::kCount;
__shared__ uint16_t smem_A_buffer[kSmemAElements];
__shared__ uint16_t smem_B_buffer[kSmemBElements];
// Instantiate thread-block-scoped store iterators
typename MultiplicandA::StoreIterator::Params store_A_params(reinterpret_cast<half *>(&smem_A_buffer[0]));
typename MultiplicandB::StoreIterator::Params store_B_params(reinterpret_cast<half *>(&smem_B_buffer[0]));
typename MultiplicandA::StoreIterator store_A_iterator(store_A_params);
typename MultiplicandB::StoreIterator store_B_iterator(store_B_params);
// Load thread-block scoped fragments
typename MultiplicandA::LoadIterator::Fragment threadblock_A_frag;
typename MultiplicandB::LoadIterator::Fragment threadblock_B_frag;
__syncthreads();
// A operand
load_A_iterator.load(threadblock_A_frag);
store_A_iterator.store(threadblock_A_frag);
// Barrier to enforce SMEM consistency
__syncthreads();
// B operand
load_B_iterator.load(threadblock_B_frag);
store_B_iterator.store(threadblock_B_frag);
// Barrier to enforce SMEM consistency
__syncthreads();
// Instantiate warp-scoped load iterators
typename MultiplicandA::WarpLoadIterator::Params warp_A_params(reinterpret_cast<half const *>(&smem_A_buffer[0]));
typename MultiplicandB::WarpLoadIterator::Params warp_B_params(reinterpret_cast<half const *>(&smem_B_buffer[0]));
typename MultiplicandA::WarpLoadIterator warp_load_A(warp_A_params);
typename MultiplicandB::WarpLoadIterator warp_load_B(warp_B_params);
// Instantiate a multiply-add object specialized for Volta mma.sync
typedef cutlass::gemm::Volta884MultiplyAdd<
typename MultiplicandA::WarpTile,
MultiplicandA::kLayout,
half,
MultiplicandB::kLayout,
half,
ScalarC
> MultiplyAdd;
typedef cutlass::gemm::Volta884NaiveEpilogue<
ScalarC,
typename MultiplicandA::WarpDelta,
typename MultiplyAdd::Iterations
> NaiveEpilogue;
MultiplyAdd multiply_add;
NaiveEpilogue epilogue(C, ldc);
// Initialize accumulator fragment
typename MultiplyAdd::Accumulators accumulators;
for (int i = 0; i < MultiplyAdd::Accumulators::kElements; ++i) {
accumulators[i] = threadIdx.x;
}
epilogue.clear(accumulators);
// Iterate over the K dimension of the threadblock tile
#pragma unroll
for (int k_idx = 0; k_idx < MultiplicandA::Tile::kD / MultiplyAdd::WarpTile::kD; ++k_idx) {
if (active_k_idx < 0 || active_k_idx == k_idx) {
typename MultiplicandA::WarpLoadIterator::Fragment warp_A_frag;
typename MultiplicandB::WarpLoadIterator::Fragment warp_B_frag;
// Load warp-scoped fragments
warp_load_A.load(warp_A_frag, cutlass::make_Coord(k_idx, 0, 0, 0));
warp_load_B.load(warp_B_frag, cutlass::make_Coord(k_idx, 0, 0, 0));
// Compute accumulated matrix product
multiply_add.multiply_add(warp_A_frag, warp_B_frag, accumulators, accumulators);
}
}
// Store accumulator tile
epilogue.store(accumulators);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Identifies multiplicand of GEMM (A or B)
cutlass::MatrixLayout::Kind LayoutA,
/// Specifies layout of data in source memory
cutlass::MatrixLayout::Kind LayoutB,
/// Accumulator type
typename ScalarC,
/// Specifies threadblock tile shape
typename Tile,
/// Specifies the warp tile shape
typename WarpTile,
/// Specifies the number of participating warps
int WarpCount,
/// Specifies the delta between warp accesses along the outer dimension
typename WarpDelta
>
struct Volta884MatrixProductTestbed {
//
// Type definitions
//
typedef cutlass::gemm::Volta884Multiplicand<
cutlass::GemmOperand::kA,
LayoutA,
Tile,
WarpTile,
WarpCount,
WarpDelta> MultiplicandA;
typedef cutlass::gemm::Volta884Multiplicand<
cutlass::GemmOperand::kB,
LayoutB,
Tile,
WarpTile,
WarpCount,
WarpDelta> MultiplicandB;
/// Generates random elements
template <typename T>
struct RandomGenerator {
RandomGenerator(
int seed = -1
) { srand(seed); }
T operator()() {
int val = (rand() % 29) - 13;
return T(val);
}
};
/// Depth of an mma.sync instruction
static int const kWarpK = 4;
//
// Data members
//
cutlass::HostMatrix<cutlass::half_t> tensor_A;
cutlass::HostMatrix<cutlass::half_t> tensor_B;
cutlass::HostMatrix<ScalarC> tensor_C;
cutlass::HostMatrix<ScalarC> tensor_Ref;
//
// Methods
//
Volta884MatrixProductTestbed() {
tensor_A.resize(cutlass::make_Coord(Tile::kW, Tile::kD), LayoutA);
tensor_B.resize(cutlass::make_Coord(Tile::kD, Tile::kH), LayoutB);
tensor_C.resize(cutlass::make_Coord(Tile::kW, Tile::kH), cutlass::MatrixLayout::kColumnMajor);
tensor_Ref.resize(cutlass::make_Coord(Tile::kW, Tile::kH), cutlass::MatrixLayout::kColumnMajor);
}
/// Runs a test case
bool run_once(int seed, int active_k_idx = -1) {
#if 0
// For debugging, it helps to see sequential elements
tensor_A.fill_sequential();
tensor_B.fill_identity();
#else
// Fill with random elements
tensor_A.fill_random(RandomGenerator<cutlass::half_t>(seed + 53));
tensor_B.fill_random(RandomGenerator<cutlass::half_t>(seed + 97));
#endif
if (active_k_idx >= 0) {
// overwrite all but the active k index with zeros
int const m_stride = (LayoutA == cutlass::MatrixLayout::kRowMajor ? Tile::kD : 1);
int const a_k_stride = (LayoutA == cutlass::MatrixLayout::kRowMajor ? 1 : Tile::kW);
int const n_stride = (LayoutB == cutlass::MatrixLayout::kRowMajor ? 1 : Tile::kD);
int const b_k_stride = (LayoutB == cutlass::MatrixLayout::kRowMajor ? Tile::kH : 1);
for (int k_idx = 0; k_idx < Tile::kD / kWarpK; ++k_idx) {
if (active_k_idx != k_idx) {
for (int k = 0; k < kWarpK; ++k) {
for (int m = 0; m < Tile::kW; ++m) {
tensor_A.host_data()[m_stride * m + a_k_stride * (k_idx * kWarpK + k)] = 0;
}
for (int n = 0; n < Tile::kH; ++n) {
tensor_B.host_data()[n_stride * n + b_k_stride * (k_idx * kWarpK + k)] = 0;
}
}
}
}
}
tensor_A.sync_device();
tensor_B.sync_device();
tensor_C.fill(ScalarC(0));
tensor_Ref.fill(ScalarC(0));
tensor_C.sync_device();
// run kernel
dim3 grid(1, 1);
dim3 block(32 * WarpCount, 1, 1);
typename MultiplicandA::LoadIterator::Params load_A_params(
tensor_A.device_data(),
tensor_A.leading_dim() * 8,
tensor_A.leading_dim(),
8
);
typename MultiplicandB::LoadIterator::Params load_B_params(
tensor_B.device_data(),
tensor_B.leading_dim() * 8,
tensor_B.leading_dim(),
8
);
test_volta884_matrix_product<MultiplicandA, MultiplicandB, ScalarC><<< grid, block >>>(
load_A_params,
load_B_params,
tensor_C.device_data(),
tensor_C.leading_dim(),
active_k_idx
);
EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess);
// Copy to host
tensor_C.sync_host();
// Compute reference
cutlass::reference::host::Gemm(
cutlass::gemm::GemmCoord(
tensor_A.size().column(),
tensor_Ref.size().column(),
tensor_Ref.size().row()),
ScalarC(1),
tensor_A,
tensor_B,
ScalarC(0),
tensor_Ref,
ScalarC(0));
// Assert bit-level equivalence
bool passed = tensor_Ref.bit_equals(tensor_C);
EXPECT_TRUE(passed)
<< "Incorrect matrix product\n"
<< "A =\n" << tensor_A
<< "\nB =\n" << tensor_B
<< "\nRef =\n" << tensor_Ref
<< "\nMMA=\n" << tensor_C;
return passed;
}
/// Executes a set of test cases containing unique, randomly chosen matrices and verifies
/// bit equivalence with the reference implementation.
bool run(int test_count = 16) {
bool passed = true;
#if 1
// Run several tests with deterministic seeds
for (int i = 0; i < test_count && passed; ++i) {
passed = run_once(i * 41 + i * 17);
}
#else
// For debugging, run the full matrix product with exactly one K-index non-zero
for (int k_idx = 0; passed && k_idx < Tile::kD / kWarpK; ++k_idx) {
passed = run_once(17, k_idx);
if (!passed) {
std::cout << "Failed on k_idx = " << k_idx
<< " [" << k_idx * kWarpK << ".." << (k_idx + 1) * kWarpK - 1 << "]" << std::endl;
}
}
#endif
return passed;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 64x64x32, 128x64x32, 64x128x32, 128x128x32, 256x128x32, 128x256x32, 64x64x128
//
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Congruous loading
//
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 64x64x32_32x32x4) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<4, 32, 32>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 128x64x32_64x32x4) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 64, 128>,
cutlass::Shape<4, 32, 64>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 64x128x32_32x64x4) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 128, 64>,
cutlass::Shape<4, 64, 32>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 64x64x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<4, 64, 64>,
1,
cutlass::Shape<1, 1, 1, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 64x64x128) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<128, 64, 64>,
cutlass::Shape<4, 64, 64>,
1,
cutlass::Shape<1, 1, 1, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 128x64x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 64, 128>,
cutlass::Shape<4, 64, 64>,
2,
cutlass::Shape<1, 1, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 64x128x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 128, 64>,
cutlass::Shape<4, 64, 64>,
2,
cutlass::Shape<1, 2, 1, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 128x128x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<4, 64, 64>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 256x128x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 128, 256>,
cutlass::Shape<4, 64, 64>,
8,
cutlass::Shape<1, 2, 4, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_nt, 128x256x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
float,
cutlass::Shape<32, 256, 128>,
cutlass::Shape<4, 64, 64>,
8,
cutlass::Shape<1, 4, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Crosswise loading
//
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 64x64x32_32x32x4) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<4, 32, 32>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 128x64x32_64x32x4) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 64, 128>,
cutlass::Shape<4, 32, 64>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 64x128x32_32x64x4) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 128, 64>,
cutlass::Shape<4, 64, 32>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 64x64x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<4, 64, 64>,
1,
cutlass::Shape<1, 1, 1, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 128x64x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 64, 128>,
cutlass::Shape<4, 64, 64>,
2,
cutlass::Shape<1, 1, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 128x128x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<4, 64, 64>,
4,
cutlass::Shape<1, 2, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 256x128x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 128, 256>,
cutlass::Shape<4, 64, 64>,
8,
cutlass::Shape<1, 2, 4, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(volta884_matrix_product_tn, 128x256x32) {
Volta884MatrixProductTestbed<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
float,
cutlass::Shape<32, 256, 128>,
cutlass::Shape<4, 64, 64>,
8,
cutlass::Shape<1, 4, 2, 1>
> testbed;
EXPECT_TRUE(testbed.run());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#endif // if defined(CUTLASS_ENABLE_TENSOR_CORE_MMA)
|
f97566606f475f8f8e3f452b1abfdafc33aa37cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bvh.cuh"
#include "tri_contact.cuh"
__host__ __device__ Box::Box(const BVH& bvh, const Face& face)
{
Box box(bvh.nodes[face.nid[0]]);
box.update(bvh.nodes[face.nid[1]]);
box.update(bvh.nodes[face.nid[2]]);
*this = box;
}
void Box::update(const Box& box_j)
{
x[0] = min(x[0], box_j.x[0]);
y[0] = min(y[0], box_j.y[0]);
z[0] = min(z[0], box_j.z[0]);
x[1] = max(x[1], box_j.x[1]);
y[1] = max(y[1], box_j.y[1]);
z[1] = max(z[1], box_j.z[1]);
}
void Box::update(const Node& node_j)
{
x[0] = min(x[0], node_j.x);
y[0] = min(y[0], node_j.y);
z[0] = min(z[0], node_j.z);
x[1] = max(x[1], node_j.x);
y[1] = max(y[1], node_j.y);
z[1] = max(z[1], node_j.z);
}
const int bitlimit = 40;
__device__ ulong morton3D(const Node& node, const Box& mbox)
{
Node boxnode(mbox.x[1] - mbox.x[0], mbox.y[1] - mbox.y[0], mbox.z[1] - mbox.z[0]);
Node newnode(node.x - mbox.x[0], node.y - mbox.y[0], node.z - mbox.z[0]);
ulong mtcd = 0;
for (int i = 0; i < bitlimit; i++) {
int ax = 0;
for (int j = 1; j < 3; j++) {
if (boxnode.c[j] > boxnode.c[ax])
ax = j;
}
boxnode.c[ax] /= 2;
mtcd = mtcd << 1;
if (newnode.c[ax] > boxnode.c[ax]) {
newnode.c[ax] -= boxnode.c[ax];
mtcd = mtcd | 1;
}
}
return mtcd;
}
__device__ Node get_barycentric_coords(const BVH& bvh, const Face& face)
{
Node node(0.0, 0.0, 0.0);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
const Node& foo_node = bvh.nodes[face.nid[j]];
node.c[i] += foo_node.c[i];
}
node.c[i] /= 3;
}
return node;
}
// Morton code
// https://developer.nvidia.com/blog/thinking-parallel-part-iii-tree-construction-gpu/
__global__ void morton_code(BVH bvh, Box mbox)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= bvh.nface)
return;
Node cnode = get_barycentric_coords(bvh, bvh.faces[tid]);
bvh.mtcode[tid] = morton3D(cnode, mbox);
}
__device__ inline int pfx(ulong a, ulong b)
{
return __clzll(a ^ b);
}
__device__ inline int sign(int a)
{
return a > 0 ? 1 : (a < 0 ? -1 : 0);
}
__device__ inline bool inside(int a, int b, int c)
{
return a >= b && a <= c;
}
// Magical parallel tree bulding
// https://developer.nvidia.com/blog/parallelforall/wp-content/uploads/2012/11/karras2012hpg_paper.pdf
__global__ void generate_tree(BVH bvh)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= bvh.nface - 1)
return;
auto mtcode = bvh.mtcode;
int d = tid == 0 ? 1 : sign(pfx(mtcode[tid], mtcode[tid + 1]) - pfx(mtcode[tid], mtcode[tid - 1]));
int minpfx = tid == 0 ? -1 : pfx(mtcode[tid], mtcode[tid - d]);
int lmax = 2, ci, cj;
ci = cj = tid;
while (inside(ci + d * lmax, 0, bvh.nface - 1) && pfx(mtcode[ci], mtcode[ci + d * lmax]) > minpfx)
lmax *= 2;
lmax /= 2;
while (lmax > 0) {
if (inside(cj + d * lmax, 0, bvh.nface - 1) && pfx(mtcode[ci], mtcode[cj + d * lmax]) > minpfx)
cj += d * lmax;
lmax /= 2;
}
minpfx = pfx(mtcode[ci], mtcode[cj]);
int ck, L, R, mid;
L = ci, R = cj + d;
while (1 < d * (R - L)) {
mid = (L + R) / 2;
if (pfx(mtcode[ci], mtcode[mid]) > minpfx)
L = mid;
else
R = mid;
}
ck = L;
int offset = bvh.nface;
ck += min(d, 0);
auto tnodes = bvh.tnodes;
tnodes[offset + tid].isleaf = false;
tnodes[offset + tid].ci = min(ci, cj);
tnodes[offset + tid].cj = max(ci, cj);
if (min(ci, cj) == ck) {
BVHnode tnode(true, ck, -1, -1, offset + tid);
tnode.bbox = Box(bvh, bvh.faces[ck]);
tnodes[ck] = tnode;
tnodes[offset + tid].lchild = ck;
} else {
tnodes[offset + tid].lchild = offset + ck;
tnodes[offset + ck].parent = offset + tid;
}
if (max(ci, cj) == ck + 1) {
BVHnode tnode(true, ck + 1, -1, -1, offset + tid);
tnode.bbox = Box(bvh, bvh.faces[ck + 1]);
tnodes[ck + 1] = tnode;
tnodes[offset + tid].rchild = ck + 1;
} else {
tnodes[offset + tid].rchild = offset + ck + 1;
tnodes[offset + ck + 1].parent = offset + tid;
}
if (tid == 0)
tnodes[offset + tid].parent = -1;
// bounding box calculation
// leafid = 0 means that node has not been visited yet
tnodes[offset + tid].leafid = 0;
}
__device__ inline void atomic_exch_tnode(Box &boxA, Box &boxB)
{
// Warning !!!! this only works for the machine in which sizeof(double) equals to sizeof(unsigned long long int)
// There is no implemention of 'double' atomic operation
atomicExch((unsigned long long int*)&(boxA.x[0]), *((unsigned long long int*)&boxB.x[0]));
atomicExch((unsigned long long int*)&(boxA.y[0]), *((unsigned long long int*)&boxB.y[0]));
atomicExch((unsigned long long int*)&(boxA.z[0]), *((unsigned long long int*)&boxB.z[0]));
atomicExch((unsigned long long int*)&(boxA.x[1]), *((unsigned long long int*)&boxB.x[1]));
atomicExch((unsigned long long int*)&(boxA.y[1]), *((unsigned long long int*)&boxB.y[1]));
atomicExch((unsigned long long int*)&(boxA.z[1]), *((unsigned long long int*)&boxB.z[1]));
}
__global__ void get_tree_bbox(BVH bvh)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= bvh.nface)
return;
// begin with leaf node
BVHnode* tnodes = bvh.tnodes;
int pa = tnodes[tid].parent;
Box box = tnodes[tid].bbox;
while (pa >= 0) {
int flag = atomicExch(&(tnodes[pa].leafid), 1);
if (flag == 0)
break;
int lc = tnodes[pa].lchild;
int rc = tnodes[pa].rchild;
box.update(tnodes[lc].bbox);
box.update(tnodes[rc].bbox);
// Use atomic to ensure data is written to global memory, not cache
atomic_exch_tnode(tnodes[pa].bbox, box);
pa = tnodes[pa].parent;
}
}
__device__ bool detect_face_colli(int a, int b, const BVH& bvh)
{
Face& face_a = bvh.faces[a];
Face& face_b = bvh.faces[b];
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++)
if (face_a.nid[i] == face_b.nid[j])
return false;
vec3f v1(bvh.nodes[face_a.nid[0]].c);
vec3f v2(bvh.nodes[face_a.nid[1]].c);
vec3f v3(bvh.nodes[face_a.nid[2]].c);
vec3f v4(bvh.nodes[face_b.nid[0]].c);
vec3f v5(bvh.nodes[face_b.nid[1]].c);
vec3f v6(bvh.nodes[face_b.nid[2]].c);
return tri_contact(v1, v2, v3, v4, v5, v6);
}
__device__ bool detect_box_colli(const Box& box_i, const Box& box_j)
{
if (box_i.x[1] < box_j.x[0] || box_i.y[1] < box_j.y[0] || box_i.z[1] < box_j.z[0])
return false;
if (box_i.x[0] > box_j.x[1] || box_i.y[0] > box_j.x[1] || box_i.z[0] > box_j.z[1])
return false;
return true;
}
const int stacksize = 64;
__global__ void detect_collision(BVH bvh)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= bvh.nface)
return;
Face face_i = bvh.faces[tid];
auto tnodes = bvh.tnodes;
Box box_i(bvh, face_i);
int stack[stacksize];
int ptr = 0;
stack[ptr++] = bvh.root;
while (ptr != 0) {
int id = stack[--ptr];
if (tnodes[id].isleaf) {
if (detect_face_colli(tid, tnodes[id].leafid, bvh)) {
int colid = atomicAdd(&(bvh.collis[0].i), 1);
if (colid + 5 < bvh.nface)
bvh.collis[colid + 1] = Collision(bvh.faces[tid].id, bvh.faces[tnodes[id].leafid].id);
}
} else {
int lc = tnodes[id].lchild;
int rc = tnodes[id].rchild;
if (tid <= tnodes[lc].cj && detect_box_colli(box_i, tnodes[lc].bbox)) {
stack[ptr++] = lc;
}
if (tid <= tnodes[rc].cj && detect_box_colli(box_i, tnodes[rc].bbox)) {
stack[ptr++] = rc;
}
}
}
} | f97566606f475f8f8e3f452b1abfdafc33aa37cb.cu |
#include "bvh.cuh"
#include "tri_contact.cuh"
__host__ __device__ Box::Box(const BVH& bvh, const Face& face)
{
Box box(bvh.nodes[face.nid[0]]);
box.update(bvh.nodes[face.nid[1]]);
box.update(bvh.nodes[face.nid[2]]);
*this = box;
}
void Box::update(const Box& box_j)
{
x[0] = min(x[0], box_j.x[0]);
y[0] = min(y[0], box_j.y[0]);
z[0] = min(z[0], box_j.z[0]);
x[1] = max(x[1], box_j.x[1]);
y[1] = max(y[1], box_j.y[1]);
z[1] = max(z[1], box_j.z[1]);
}
void Box::update(const Node& node_j)
{
x[0] = min(x[0], node_j.x);
y[0] = min(y[0], node_j.y);
z[0] = min(z[0], node_j.z);
x[1] = max(x[1], node_j.x);
y[1] = max(y[1], node_j.y);
z[1] = max(z[1], node_j.z);
}
const int bitlimit = 40;
__device__ ulong morton3D(const Node& node, const Box& mbox)
{
Node boxnode(mbox.x[1] - mbox.x[0], mbox.y[1] - mbox.y[0], mbox.z[1] - mbox.z[0]);
Node newnode(node.x - mbox.x[0], node.y - mbox.y[0], node.z - mbox.z[0]);
ulong mtcd = 0;
for (int i = 0; i < bitlimit; i++) {
int ax = 0;
for (int j = 1; j < 3; j++) {
if (boxnode.c[j] > boxnode.c[ax])
ax = j;
}
boxnode.c[ax] /= 2;
mtcd = mtcd << 1;
if (newnode.c[ax] > boxnode.c[ax]) {
newnode.c[ax] -= boxnode.c[ax];
mtcd = mtcd | 1;
}
}
return mtcd;
}
__device__ Node get_barycentric_coords(const BVH& bvh, const Face& face)
{
Node node(0.0, 0.0, 0.0);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
const Node& foo_node = bvh.nodes[face.nid[j]];
node.c[i] += foo_node.c[i];
}
node.c[i] /= 3;
}
return node;
}
// Morton code
// https://developer.nvidia.com/blog/thinking-parallel-part-iii-tree-construction-gpu/
__global__ void morton_code(BVH bvh, Box mbox)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= bvh.nface)
return;
Node cnode = get_barycentric_coords(bvh, bvh.faces[tid]);
bvh.mtcode[tid] = morton3D(cnode, mbox);
}
__device__ inline int pfx(ulong a, ulong b)
{
return __clzll(a ^ b);
}
__device__ inline int sign(int a)
{
return a > 0 ? 1 : (a < 0 ? -1 : 0);
}
__device__ inline bool inside(int a, int b, int c)
{
return a >= b && a <= c;
}
// Magical parallel tree bulding
// https://developer.nvidia.com/blog/parallelforall/wp-content/uploads/2012/11/karras2012hpg_paper.pdf
__global__ void generate_tree(BVH bvh)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= bvh.nface - 1)
return;
auto mtcode = bvh.mtcode;
int d = tid == 0 ? 1 : sign(pfx(mtcode[tid], mtcode[tid + 1]) - pfx(mtcode[tid], mtcode[tid - 1]));
int minpfx = tid == 0 ? -1 : pfx(mtcode[tid], mtcode[tid - d]);
int lmax = 2, ci, cj;
ci = cj = tid;
while (inside(ci + d * lmax, 0, bvh.nface - 1) && pfx(mtcode[ci], mtcode[ci + d * lmax]) > minpfx)
lmax *= 2;
lmax /= 2;
while (lmax > 0) {
if (inside(cj + d * lmax, 0, bvh.nface - 1) && pfx(mtcode[ci], mtcode[cj + d * lmax]) > minpfx)
cj += d * lmax;
lmax /= 2;
}
minpfx = pfx(mtcode[ci], mtcode[cj]);
int ck, L, R, mid;
L = ci, R = cj + d;
while (1 < d * (R - L)) {
mid = (L + R) / 2;
if (pfx(mtcode[ci], mtcode[mid]) > minpfx)
L = mid;
else
R = mid;
}
ck = L;
int offset = bvh.nface;
ck += min(d, 0);
auto tnodes = bvh.tnodes;
tnodes[offset + tid].isleaf = false;
tnodes[offset + tid].ci = min(ci, cj);
tnodes[offset + tid].cj = max(ci, cj);
if (min(ci, cj) == ck) {
BVHnode tnode(true, ck, -1, -1, offset + tid);
tnode.bbox = Box(bvh, bvh.faces[ck]);
tnodes[ck] = tnode;
tnodes[offset + tid].lchild = ck;
} else {
tnodes[offset + tid].lchild = offset + ck;
tnodes[offset + ck].parent = offset + tid;
}
if (max(ci, cj) == ck + 1) {
BVHnode tnode(true, ck + 1, -1, -1, offset + tid);
tnode.bbox = Box(bvh, bvh.faces[ck + 1]);
tnodes[ck + 1] = tnode;
tnodes[offset + tid].rchild = ck + 1;
} else {
tnodes[offset + tid].rchild = offset + ck + 1;
tnodes[offset + ck + 1].parent = offset + tid;
}
if (tid == 0)
tnodes[offset + tid].parent = -1;
// bounding box calculation
// leafid = 0 means that node has not been visited yet
tnodes[offset + tid].leafid = 0;
}
__device__ inline void atomic_exch_tnode(Box &boxA, Box &boxB)
{
// Warning !!!! this only works for the machine in which sizeof(double) equals to sizeof(unsigned long long int)
// There is no implemention of 'double' atomic operation
atomicExch((unsigned long long int*)&(boxA.x[0]), *((unsigned long long int*)&boxB.x[0]));
atomicExch((unsigned long long int*)&(boxA.y[0]), *((unsigned long long int*)&boxB.y[0]));
atomicExch((unsigned long long int*)&(boxA.z[0]), *((unsigned long long int*)&boxB.z[0]));
atomicExch((unsigned long long int*)&(boxA.x[1]), *((unsigned long long int*)&boxB.x[1]));
atomicExch((unsigned long long int*)&(boxA.y[1]), *((unsigned long long int*)&boxB.y[1]));
atomicExch((unsigned long long int*)&(boxA.z[1]), *((unsigned long long int*)&boxB.z[1]));
}
__global__ void get_tree_bbox(BVH bvh)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= bvh.nface)
return;
// begin with leaf node
BVHnode* tnodes = bvh.tnodes;
int pa = tnodes[tid].parent;
Box box = tnodes[tid].bbox;
while (pa >= 0) {
int flag = atomicExch(&(tnodes[pa].leafid), 1);
if (flag == 0)
break;
int lc = tnodes[pa].lchild;
int rc = tnodes[pa].rchild;
box.update(tnodes[lc].bbox);
box.update(tnodes[rc].bbox);
// Use atomic to ensure data is written to global memory, not cache
atomic_exch_tnode(tnodes[pa].bbox, box);
pa = tnodes[pa].parent;
}
}
__device__ bool detect_face_colli(int a, int b, const BVH& bvh)
{
Face& face_a = bvh.faces[a];
Face& face_b = bvh.faces[b];
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++)
if (face_a.nid[i] == face_b.nid[j])
return false;
vec3f v1(bvh.nodes[face_a.nid[0]].c);
vec3f v2(bvh.nodes[face_a.nid[1]].c);
vec3f v3(bvh.nodes[face_a.nid[2]].c);
vec3f v4(bvh.nodes[face_b.nid[0]].c);
vec3f v5(bvh.nodes[face_b.nid[1]].c);
vec3f v6(bvh.nodes[face_b.nid[2]].c);
return tri_contact(v1, v2, v3, v4, v5, v6);
}
__device__ bool detect_box_colli(const Box& box_i, const Box& box_j)
{
if (box_i.x[1] < box_j.x[0] || box_i.y[1] < box_j.y[0] || box_i.z[1] < box_j.z[0])
return false;
if (box_i.x[0] > box_j.x[1] || box_i.y[0] > box_j.x[1] || box_i.z[0] > box_j.z[1])
return false;
return true;
}
const int stacksize = 64;
__global__ void detect_collision(BVH bvh)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= bvh.nface)
return;
Face face_i = bvh.faces[tid];
auto tnodes = bvh.tnodes;
Box box_i(bvh, face_i);
int stack[stacksize];
int ptr = 0;
stack[ptr++] = bvh.root;
while (ptr != 0) {
int id = stack[--ptr];
if (tnodes[id].isleaf) {
if (detect_face_colli(tid, tnodes[id].leafid, bvh)) {
int colid = atomicAdd(&(bvh.collis[0].i), 1);
if (colid + 5 < bvh.nface)
bvh.collis[colid + 1] = Collision(bvh.faces[tid].id, bvh.faces[tnodes[id].leafid].id);
}
} else {
int lc = tnodes[id].lchild;
int rc = tnodes[id].rchild;
if (tid <= tnodes[lc].cj && detect_box_colli(box_i, tnodes[lc].bbox)) {
stack[ptr++] = lc;
}
if (tid <= tnodes[rc].cj && detect_box_colli(box_i, tnodes[rc].bbox)) {
stack[ptr++] = rc;
}
}
}
} |
df6fd75b62831ff77b8e87f5c742b96e636a35e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "../THCTensorMathReduce.cuh"
#include "THHTensor.hpp"
#include "THHStream.h"
#include "../generic/THCTensorMathReduce.cu"
#include <THH/THHGenerateCharType.h>
| df6fd75b62831ff77b8e87f5c742b96e636a35e0.cu | #include "../THCTensorMathReduce.cuh"
#include "THCTensor.hpp"
#include "THCStream.h"
#include "../generic/THCTensorMathReduce.cu"
#include <THC/THCGenerateCharType.h>
|
269695f5bc2755d59e52032aa76243d5a09d9e51.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <blas.hpp>
#include <arith.hpp>
#include <cast.hpp>
#include <common/err_common.hpp>
#include <common/half.hpp>
#include <complex.hpp>
#include <copy.hpp>
#include <cublas.hpp>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <err_cuda.hpp>
#include <math.hpp>
#include <platform.hpp>
#include <reduce.hpp>
#include <tile.hpp>
#include <transpose.hpp>
#include <types.hpp>
#include <cassert>
#include <functional>
#include <stdexcept>
#include <string>
#include <vector>
using common::half;
using common::kernel_type;
using std::is_same;
using std::vector;
namespace cuda {
hipblasOperation_t toCblasTranspose(af_mat_prop opt) {
hipblasOperation_t out = HIPBLAS_OP_N;
switch (opt) {
case AF_MAT_NONE: out = HIPBLAS_OP_N; break;
case AF_MAT_TRANS: out = HIPBLAS_OP_T; break;
case AF_MAT_CTRANS: out = HIPBLAS_OP_C; break;
default: AF_ERROR("INVALID af_mat_prop", AF_ERR_ARG);
}
return out;
}
template<typename T>
using gemm_func_def = std::function<hipblasStatus_t(
hipblasHandle_t, hipblasOperation_t, hipblasOperation_t, int, int, int,
const T *, const T *, int, const T *, int, const T *, T *, int)>;
template<typename T>
using gemmBatched_func_def = std::function<hipblasStatus_t(
hipblasHandle_t, hipblasOperation_t, hipblasOperation_t, int, int, int,
const T *, const T **, int, const T **, int, const T *, T **, int, int)>;
template<typename T>
using trsm_func_def = std::function<hipblasStatus_t(
hipblasHandle_t, hipblasSideMode_t, hipblasFillMode_t, hipblasOperation_t,
hipblasDiagType_t, int, int, const T *, const T *, int, T *, int)>;
#define BLAS_FUNC_DEF(FUNC) \
template<typename T> \
FUNC##_func_def<T> FUNC##_func();
#define BLAS_FUNC(FUNC, TYPE, PREFIX) \
template<> \
FUNC##_func_def<TYPE> FUNC##_func<TYPE>() { \
return &cublas##PREFIX##FUNC; \
}
BLAS_FUNC_DEF(gemm)
BLAS_FUNC(gemm, float, S)
BLAS_FUNC(gemm, cfloat, C)
BLAS_FUNC(gemm, double, D)
BLAS_FUNC(gemm, cdouble, Z)
BLAS_FUNC(gemm, __half, H)
BLAS_FUNC_DEF(gemmBatched)
BLAS_FUNC(gemmBatched, float, S)
BLAS_FUNC(gemmBatched, cfloat, C)
BLAS_FUNC(gemmBatched, double, D)
BLAS_FUNC(gemmBatched, cdouble, Z)
BLAS_FUNC(gemmBatched, __half, H)
BLAS_FUNC_DEF(trsm)
BLAS_FUNC(trsm, float, S)
BLAS_FUNC(trsm, cfloat, C)
BLAS_FUNC(trsm, double, D)
BLAS_FUNC(trsm, cdouble, Z)
#undef BLAS_FUNC
#undef BLAS_FUNC_DEF
template<typename T, bool conjugate>
struct dot_func_def_t {
typedef hipblasStatus_t (*dot_func_def)(hipblasHandle_t, int, const T *, int,
const T *, int, T *);
};
#define BLAS_FUNC_DEF(FUNC) \
template<typename T, bool conjugate> \
typename FUNC##_func_def_t<T, conjugate>::FUNC##_func_def FUNC##_func();
#define BLAS_FUNC(FUNC, TYPE, CONJUGATE, PREFIX) \
template<> \
typename FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def \
FUNC##_func<TYPE, CONJUGATE>() { \
return (FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def) & \
cublas##PREFIX##FUNC; \
}
BLAS_FUNC_DEF(dot)
BLAS_FUNC(dot, float, true, S)
BLAS_FUNC(dot, double, true, D)
BLAS_FUNC(dot, float, false, S)
BLAS_FUNC(dot, double, false, D)
#undef BLAS_FUNC
#define BLAS_FUNC(FUNC, TYPE, CONJUGATE, PREFIX, SUFFIX) \
template<> \
typename FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def \
FUNC##_func<TYPE, CONJUGATE>() { \
return (FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def) & \
cublas##PREFIX##FUNC##SUFFIX; \
}
BLAS_FUNC_DEF(dot)
BLAS_FUNC(dot, cfloat, true, C, c)
BLAS_FUNC(dot, cdouble, true, Z, c)
BLAS_FUNC(dot, cfloat, false, C, u)
BLAS_FUNC(dot, cdouble, false, Z, u)
#undef BLAS_FUNC
#undef BLAS_FUNC_DEF
template<typename T>
hipDataType getType();
template<>
hipDataType getType<float>() {
return HIP_R_32F;
}
template<>
hipDataType getType<cfloat>() {
return HIP_C_32F;
}
template<>
hipDataType getType<double>() {
return HIP_R_64F;
}
template<>
hipDataType getType<cdouble>() {
return HIP_C_64F;
}
template<>
hipDataType getType<half>() {
return HIP_R_16F;
}
template<typename T>
hipDataType getComputeType() {
return getType<T>();
}
template<>
hipDataType getComputeType<half>() {
auto dev = getDeviceProp(getActiveDeviceId());
hipDataType algo = getType<half>();
// There is probbaly a bug in nvidia cuda docs and/or drivers: According to
// https://docs.nvidia.com/cuda/cublas/index.html#cublas-GemmEx computeType
// could be 32F even if A/B inputs are 16F. But CudaCompute 6.1 GPUs (for
// example GTX10X0) dont seem to be capbale to compute at f32 when the
// inputs are f16: results are inf if trying to do so and hipblasGemmEx even
// returns OK. At the moment let's comment out : the drawback is just that
// the speed of f16 computation on these GPUs is very slow:
//
// if (dev.major == // 6 && dev.minor == 1) { algo = HIP_R_32F; }
return algo;
}
template<typename T>
hipblasGemmAlgo_t selectGEMMAlgorithm() {
auto dev = getDeviceProp(getActiveDeviceId());
hipblasGemmAlgo_t algo = HIPBLAS_GEMM_DEFAULT;
return algo;
}
template<>
hipblasGemmAlgo_t selectGEMMAlgorithm<common::half>() {
auto dev = getDeviceProp(getActiveDeviceId());
hipblasGemmAlgo_t algo = HIPBLAS_GEMM_DEFAULT;
if (dev.major >= 7) { algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; }
return algo;
}
template<>
hipblasGemmAlgo_t selectGEMMAlgorithm<__half>() {
return selectGEMMAlgorithm<common::half>();
}
template<typename T>
hipblasStatus_t gemmDispatch(BlasHandle handle, hipblasOperation_t lOpts,
hipblasOperation_t rOpts, int M, int N, int K,
const T *alpha, const Array<T> &lhs, dim_t lStride,
const Array<T> &rhs, dim_t rStride, const T *beta,
Array<T> &out, dim_t oleading) {
auto prop = getDeviceProp(getActiveDeviceId());
if (prop.major > 3) {
return hipblasGemmEx(
blasHandle(), lOpts, rOpts, M, N, K, alpha, lhs.get(), getType<T>(),
lStride, rhs.get(), getType<T>(), rStride, beta, out.get(),
getType<T>(), out.strides()[1],
getComputeType<T>(), // Compute type
// NOTE: When using the CUBLAS_GEMM_DEFAULT_TENSOR_OP algorithm
// for the cublasGemm*Ex functions, the performance of the
// fp32 numbers seem to increase dramatically. Their numerical
// accuracy is also different compared to regular gemm fuctions.
// The HIPBLAS_GEMM_DEFAULT algorithm selection does not experience
// this change. Does this imply that the TENSOR_OP function
// performs the computation in fp16 bit even when the compute
// type is HIP_R_32F?
selectGEMMAlgorithm<T>());
} else {
using Nt = typename common::kernel_type<T>::native;
return gemm_func<Nt>()(blasHandle(), lOpts, rOpts, M, N, K, (Nt *)alpha,
(Nt *)lhs.get(), lStride, (Nt *)rhs.get(),
rStride, (Nt *)beta, (Nt *)out.get(), oleading);
}
}
template<typename T>
hipblasStatus_t gemmBatchedDispatch(BlasHandle handle, hipblasOperation_t lOpts,
hipblasOperation_t rOpts, int M, int N, int K,
const T *alpha, const T **lptrs,
int lStrides, const T **rptrs, int rStrides,
const T *beta, T **optrs, int oStrides,
int batchSize) {
auto prop = getDeviceProp(getActiveDeviceId());
if (prop.major > 3) {
return hipblasGemmBatchedEx(
blasHandle(), lOpts, rOpts, M, N, K, alpha, (const void **)lptrs,
getType<T>(), lStrides, (const void **)rptrs, getType<T>(),
rStrides, beta, (void **)optrs, getType<T>(), oStrides, batchSize,
getComputeType<T>(), // compute type
// NOTE: When using the CUBLAS_GEMM_DEFAULT_TENSOR_OP algorithm
// for the cublasGemm*Ex functions, the performance of the
// fp32 numbers seem to increase dramatically. Their numerical
// accuracy is also different compared to regular gemm fuctions.
// The HIPBLAS_GEMM_DEFAULT algorithm selection does not experience
// this change. Does this imply that the TENSOR_OP function
// performs the computation in fp16 bit even when the compute
// type is HIP_R_32F?
selectGEMMAlgorithm<T>());
} else {
using Nt = typename common::kernel_type<T>::native;
return gemmBatched_func<Nt>()(
blasHandle(), lOpts, rOpts, M, N, K, (const Nt *)alpha,
(const Nt **)lptrs, lStrides, (const Nt **)rptrs, rStrides,
(const Nt *)beta, (Nt **)optrs, oStrides, batchSize);
}
}
template<typename T>
void gemm(Array<T> &out, af_mat_prop optLhs, af_mat_prop optRhs, const T *alpha,
const Array<T> &lhs, const Array<T> &rhs, const T *beta) {
const hipblasOperation_t lOpts = toCblasTranspose(optLhs);
const hipblasOperation_t rOpts = toCblasTranspose(optRhs);
const int aRowDim = (lOpts == HIPBLAS_OP_N) ? 0 : 1;
const int aColDim = (lOpts == HIPBLAS_OP_N) ? 1 : 0;
const int bColDim = (rOpts == HIPBLAS_OP_N) ? 1 : 0;
const dim4 lDims = lhs.dims();
const dim4 rDims = rhs.dims();
const int M = lDims[aRowDim];
const int N = rDims[bColDim];
const int K = lDims[aColDim];
const dim4 oDims = out.dims();
dim4 lStrides = lhs.strides();
dim4 rStrides = rhs.strides();
dim4 oStrides = out.strides();
if (oDims.ndims() <= 2) {
CUBLAS_CHECK(gemmDispatch<T>(blasHandle(), lOpts, rOpts, M, N, K, alpha,
lhs, lStrides[1], rhs, rStrides[1], beta,
out, oStrides[1]));
} else {
int batchSize = oDims[2] * oDims[3];
vector<const T *> lptrs(batchSize);
vector<const T *> rptrs(batchSize);
vector<T *> optrs(batchSize);
bool is_l_d2_batched = oDims[2] == lDims[2];
bool is_l_d3_batched = oDims[3] == lDims[3];
bool is_r_d2_batched = oDims[2] == rDims[2];
bool is_r_d3_batched = oDims[3] == rDims[3];
const T *lptr = lhs.get();
const T *rptr = rhs.get();
T *optr = out.get();
for (int n = 0; n < batchSize; n++) {
int w = n / oDims[2];
int z = n - w * oDims[2];
int loff = z * (is_l_d2_batched * lStrides[2]) +
w * (is_l_d3_batched * lStrides[3]);
int roff = z * (is_r_d2_batched * rStrides[2]) +
w * (is_r_d3_batched * rStrides[3]);
lptrs[n] = lptr + loff;
rptrs[n] = rptr + roff;
optrs[n] = optr + z * oStrides[2] + w * oStrides[3];
}
size_t bytes = batchSize * sizeof(T **);
auto d_lptrs = memAlloc<uchar>(bytes);
auto d_rptrs = memAlloc<uchar>(bytes);
auto d_optrs = memAlloc<uchar>(bytes);
CUDA_CHECK(hipMemcpyAsync(d_lptrs.get(), lptrs.data(), bytes,
hipMemcpyHostToDevice, getActiveStream()));
CUDA_CHECK(hipMemcpyAsync(d_rptrs.get(), rptrs.data(), bytes,
hipMemcpyHostToDevice, getActiveStream()));
CUDA_CHECK(hipMemcpyAsync(d_optrs.get(), optrs.data(), bytes,
hipMemcpyHostToDevice, getActiveStream()));
// Call this before the gemm call so that you don't have to wait for the
// computation. Even though it would make more sense to put it
// afterwards
CUDA_CHECK(hipStreamSynchronize(getActiveStream()));
using Nt = typename common::kernel_type<T>::native;
CUBLAS_CHECK(gemmBatchedDispatch(
blasHandle(), lOpts, rOpts, M, N, K, alpha,
(const T **)d_lptrs.get(), lStrides[1], (const T **)d_rptrs.get(),
rStrides[1], beta, (T **)d_optrs.get(), oStrides[1], batchSize));
}
}
template<typename T>
Array<T> dot(const Array<T> &lhs, const Array<T> &rhs, af_mat_prop optLhs,
af_mat_prop optRhs) {
auto lhs_ = (optLhs == AF_MAT_NONE ? lhs : conj<T>(lhs));
auto rhs_ = (optRhs == AF_MAT_NONE ? rhs : conj<T>(rhs));
auto temp = arithOp<T, af_mul_t>(lhs_, rhs_, lhs_.dims());
return reduce<af_add_t, T, T>(temp, 0, false, 0);
}
template<typename T>
void trsm(const Array<T> &lhs, Array<T> &rhs, af_mat_prop trans, bool is_upper,
bool is_left, bool is_unit) {
// dim4 lDims = lhs.dims();
dim4 rDims = rhs.dims();
int M = rDims[0];
int N = rDims[1];
T alpha = scalar<T>(1);
dim4 lStrides = lhs.strides();
dim4 rStrides = rhs.strides();
CUBLAS_CHECK(trsm_func<T>()(
blasHandle(), is_left ? HIPBLAS_SIDE_LEFT : HIPBLAS_SIDE_RIGHT,
is_upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER,
toCblasTranspose(trans),
is_unit ? HIPBLAS_DIAG_UNIT : HIPBLAS_DIAG_NON_UNIT, M, N, &alpha,
lhs.get(), lStrides[1], rhs.get(), rStrides[1]));
}
#define INSTANTIATE_GEMM(TYPE) \
template void gemm<TYPE>(Array<TYPE> & out, af_mat_prop optLhs, \
af_mat_prop optRhs, const TYPE *alpha, \
const Array<TYPE> &lhs, const Array<TYPE> &rhs, \
const TYPE *beta);
INSTANTIATE_GEMM(float)
INSTANTIATE_GEMM(cfloat)
INSTANTIATE_GEMM(double)
INSTANTIATE_GEMM(cdouble)
INSTANTIATE_GEMM(half)
#define INSTANTIATE_DOT(TYPE) \
template Array<TYPE> dot<TYPE>(const Array<TYPE> &lhs, \
const Array<TYPE> &rhs, af_mat_prop optLhs, \
af_mat_prop optRhs);
INSTANTIATE_DOT(float)
INSTANTIATE_DOT(double)
INSTANTIATE_DOT(cfloat)
INSTANTIATE_DOT(cdouble)
INSTANTIATE_DOT(half)
#define INSTANTIATE_TRSM(TYPE) \
template void trsm<TYPE>(const Array<TYPE> &lhs, Array<TYPE> &rhs, \
af_mat_prop trans, bool is_upper, bool is_left, \
bool is_unit);
INSTANTIATE_TRSM(float)
INSTANTIATE_TRSM(cfloat)
INSTANTIATE_TRSM(double)
INSTANTIATE_TRSM(cdouble)
} // namespace cuda
| 269695f5bc2755d59e52032aa76243d5a09d9e51.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <blas.hpp>
#include <arith.hpp>
#include <cast.hpp>
#include <common/err_common.hpp>
#include <common/half.hpp>
#include <complex.hpp>
#include <copy.hpp>
#include <cublas.hpp>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <err_cuda.hpp>
#include <math.hpp>
#include <platform.hpp>
#include <reduce.hpp>
#include <tile.hpp>
#include <transpose.hpp>
#include <types.hpp>
#include <cassert>
#include <functional>
#include <stdexcept>
#include <string>
#include <vector>
using common::half;
using common::kernel_type;
using std::is_same;
using std::vector;
namespace cuda {
cublasOperation_t toCblasTranspose(af_mat_prop opt) {
cublasOperation_t out = CUBLAS_OP_N;
switch (opt) {
case AF_MAT_NONE: out = CUBLAS_OP_N; break;
case AF_MAT_TRANS: out = CUBLAS_OP_T; break;
case AF_MAT_CTRANS: out = CUBLAS_OP_C; break;
default: AF_ERROR("INVALID af_mat_prop", AF_ERR_ARG);
}
return out;
}
template<typename T>
using gemm_func_def = std::function<cublasStatus_t(
cublasHandle_t, cublasOperation_t, cublasOperation_t, int, int, int,
const T *, const T *, int, const T *, int, const T *, T *, int)>;
template<typename T>
using gemmBatched_func_def = std::function<cublasStatus_t(
cublasHandle_t, cublasOperation_t, cublasOperation_t, int, int, int,
const T *, const T **, int, const T **, int, const T *, T **, int, int)>;
template<typename T>
using trsm_func_def = std::function<cublasStatus_t(
cublasHandle_t, cublasSideMode_t, cublasFillMode_t, cublasOperation_t,
cublasDiagType_t, int, int, const T *, const T *, int, T *, int)>;
#define BLAS_FUNC_DEF(FUNC) \
template<typename T> \
FUNC##_func_def<T> FUNC##_func();
#define BLAS_FUNC(FUNC, TYPE, PREFIX) \
template<> \
FUNC##_func_def<TYPE> FUNC##_func<TYPE>() { \
return &cublas##PREFIX##FUNC; \
}
BLAS_FUNC_DEF(gemm)
BLAS_FUNC(gemm, float, S)
BLAS_FUNC(gemm, cfloat, C)
BLAS_FUNC(gemm, double, D)
BLAS_FUNC(gemm, cdouble, Z)
BLAS_FUNC(gemm, __half, H)
BLAS_FUNC_DEF(gemmBatched)
BLAS_FUNC(gemmBatched, float, S)
BLAS_FUNC(gemmBatched, cfloat, C)
BLAS_FUNC(gemmBatched, double, D)
BLAS_FUNC(gemmBatched, cdouble, Z)
BLAS_FUNC(gemmBatched, __half, H)
BLAS_FUNC_DEF(trsm)
BLAS_FUNC(trsm, float, S)
BLAS_FUNC(trsm, cfloat, C)
BLAS_FUNC(trsm, double, D)
BLAS_FUNC(trsm, cdouble, Z)
#undef BLAS_FUNC
#undef BLAS_FUNC_DEF
template<typename T, bool conjugate>
struct dot_func_def_t {
typedef cublasStatus_t (*dot_func_def)(cublasHandle_t, int, const T *, int,
const T *, int, T *);
};
#define BLAS_FUNC_DEF(FUNC) \
template<typename T, bool conjugate> \
typename FUNC##_func_def_t<T, conjugate>::FUNC##_func_def FUNC##_func();
#define BLAS_FUNC(FUNC, TYPE, CONJUGATE, PREFIX) \
template<> \
typename FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def \
FUNC##_func<TYPE, CONJUGATE>() { \
return (FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def) & \
cublas##PREFIX##FUNC; \
}
BLAS_FUNC_DEF(dot)
BLAS_FUNC(dot, float, true, S)
BLAS_FUNC(dot, double, true, D)
BLAS_FUNC(dot, float, false, S)
BLAS_FUNC(dot, double, false, D)
#undef BLAS_FUNC
#define BLAS_FUNC(FUNC, TYPE, CONJUGATE, PREFIX, SUFFIX) \
template<> \
typename FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def \
FUNC##_func<TYPE, CONJUGATE>() { \
return (FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def) & \
cublas##PREFIX##FUNC##SUFFIX; \
}
BLAS_FUNC_DEF(dot)
BLAS_FUNC(dot, cfloat, true, C, c)
BLAS_FUNC(dot, cdouble, true, Z, c)
BLAS_FUNC(dot, cfloat, false, C, u)
BLAS_FUNC(dot, cdouble, false, Z, u)
#undef BLAS_FUNC
#undef BLAS_FUNC_DEF
template<typename T>
cudaDataType_t getType();
template<>
cudaDataType_t getType<float>() {
return CUDA_R_32F;
}
template<>
cudaDataType_t getType<cfloat>() {
return CUDA_C_32F;
}
template<>
cudaDataType_t getType<double>() {
return CUDA_R_64F;
}
template<>
cudaDataType_t getType<cdouble>() {
return CUDA_C_64F;
}
template<>
cudaDataType_t getType<half>() {
return CUDA_R_16F;
}
template<typename T>
cudaDataType_t getComputeType() {
return getType<T>();
}
template<>
cudaDataType_t getComputeType<half>() {
auto dev = getDeviceProp(getActiveDeviceId());
cudaDataType_t algo = getType<half>();
// There is probbaly a bug in nvidia cuda docs and/or drivers: According to
// https://docs.nvidia.com/cuda/cublas/index.html#cublas-GemmEx computeType
// could be 32F even if A/B inputs are 16F. But CudaCompute 6.1 GPUs (for
// example GTX10X0) dont seem to be capbale to compute at f32 when the
// inputs are f16: results are inf if trying to do so and cublasGemmEx even
// returns OK. At the moment let's comment out : the drawback is just that
// the speed of f16 computation on these GPUs is very slow:
//
// if (dev.major == // 6 && dev.minor == 1) { algo = CUDA_R_32F; }
return algo;
}
template<typename T>
cublasGemmAlgo_t selectGEMMAlgorithm() {
auto dev = getDeviceProp(getActiveDeviceId());
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
return algo;
}
template<>
cublasGemmAlgo_t selectGEMMAlgorithm<common::half>() {
auto dev = getDeviceProp(getActiveDeviceId());
cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT;
if (dev.major >= 7) { algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; }
return algo;
}
template<>
cublasGemmAlgo_t selectGEMMAlgorithm<__half>() {
return selectGEMMAlgorithm<common::half>();
}
template<typename T>
cublasStatus_t gemmDispatch(BlasHandle handle, cublasOperation_t lOpts,
cublasOperation_t rOpts, int M, int N, int K,
const T *alpha, const Array<T> &lhs, dim_t lStride,
const Array<T> &rhs, dim_t rStride, const T *beta,
Array<T> &out, dim_t oleading) {
auto prop = getDeviceProp(getActiveDeviceId());
if (prop.major > 3) {
return cublasGemmEx(
blasHandle(), lOpts, rOpts, M, N, K, alpha, lhs.get(), getType<T>(),
lStride, rhs.get(), getType<T>(), rStride, beta, out.get(),
getType<T>(), out.strides()[1],
getComputeType<T>(), // Compute type
// NOTE: When using the CUBLAS_GEMM_DEFAULT_TENSOR_OP algorithm
// for the cublasGemm*Ex functions, the performance of the
// fp32 numbers seem to increase dramatically. Their numerical
// accuracy is also different compared to regular gemm fuctions.
// The CUBLAS_GEMM_DEFAULT algorithm selection does not experience
// this change. Does this imply that the TENSOR_OP function
// performs the computation in fp16 bit even when the compute
// type is CUDA_R_32F?
selectGEMMAlgorithm<T>());
} else {
using Nt = typename common::kernel_type<T>::native;
return gemm_func<Nt>()(blasHandle(), lOpts, rOpts, M, N, K, (Nt *)alpha,
(Nt *)lhs.get(), lStride, (Nt *)rhs.get(),
rStride, (Nt *)beta, (Nt *)out.get(), oleading);
}
}
template<typename T>
cublasStatus_t gemmBatchedDispatch(BlasHandle handle, cublasOperation_t lOpts,
cublasOperation_t rOpts, int M, int N, int K,
const T *alpha, const T **lptrs,
int lStrides, const T **rptrs, int rStrides,
const T *beta, T **optrs, int oStrides,
int batchSize) {
auto prop = getDeviceProp(getActiveDeviceId());
if (prop.major > 3) {
return cublasGemmBatchedEx(
blasHandle(), lOpts, rOpts, M, N, K, alpha, (const void **)lptrs,
getType<T>(), lStrides, (const void **)rptrs, getType<T>(),
rStrides, beta, (void **)optrs, getType<T>(), oStrides, batchSize,
getComputeType<T>(), // compute type
// NOTE: When using the CUBLAS_GEMM_DEFAULT_TENSOR_OP algorithm
// for the cublasGemm*Ex functions, the performance of the
// fp32 numbers seem to increase dramatically. Their numerical
// accuracy is also different compared to regular gemm fuctions.
// The CUBLAS_GEMM_DEFAULT algorithm selection does not experience
// this change. Does this imply that the TENSOR_OP function
// performs the computation in fp16 bit even when the compute
// type is CUDA_R_32F?
selectGEMMAlgorithm<T>());
} else {
using Nt = typename common::kernel_type<T>::native;
return gemmBatched_func<Nt>()(
blasHandle(), lOpts, rOpts, M, N, K, (const Nt *)alpha,
(const Nt **)lptrs, lStrides, (const Nt **)rptrs, rStrides,
(const Nt *)beta, (Nt **)optrs, oStrides, batchSize);
}
}
template<typename T>
void gemm(Array<T> &out, af_mat_prop optLhs, af_mat_prop optRhs, const T *alpha,
const Array<T> &lhs, const Array<T> &rhs, const T *beta) {
const cublasOperation_t lOpts = toCblasTranspose(optLhs);
const cublasOperation_t rOpts = toCblasTranspose(optRhs);
const int aRowDim = (lOpts == CUBLAS_OP_N) ? 0 : 1;
const int aColDim = (lOpts == CUBLAS_OP_N) ? 1 : 0;
const int bColDim = (rOpts == CUBLAS_OP_N) ? 1 : 0;
const dim4 lDims = lhs.dims();
const dim4 rDims = rhs.dims();
const int M = lDims[aRowDim];
const int N = rDims[bColDim];
const int K = lDims[aColDim];
const dim4 oDims = out.dims();
dim4 lStrides = lhs.strides();
dim4 rStrides = rhs.strides();
dim4 oStrides = out.strides();
if (oDims.ndims() <= 2) {
CUBLAS_CHECK(gemmDispatch<T>(blasHandle(), lOpts, rOpts, M, N, K, alpha,
lhs, lStrides[1], rhs, rStrides[1], beta,
out, oStrides[1]));
} else {
int batchSize = oDims[2] * oDims[3];
vector<const T *> lptrs(batchSize);
vector<const T *> rptrs(batchSize);
vector<T *> optrs(batchSize);
bool is_l_d2_batched = oDims[2] == lDims[2];
bool is_l_d3_batched = oDims[3] == lDims[3];
bool is_r_d2_batched = oDims[2] == rDims[2];
bool is_r_d3_batched = oDims[3] == rDims[3];
const T *lptr = lhs.get();
const T *rptr = rhs.get();
T *optr = out.get();
for (int n = 0; n < batchSize; n++) {
int w = n / oDims[2];
int z = n - w * oDims[2];
int loff = z * (is_l_d2_batched * lStrides[2]) +
w * (is_l_d3_batched * lStrides[3]);
int roff = z * (is_r_d2_batched * rStrides[2]) +
w * (is_r_d3_batched * rStrides[3]);
lptrs[n] = lptr + loff;
rptrs[n] = rptr + roff;
optrs[n] = optr + z * oStrides[2] + w * oStrides[3];
}
size_t bytes = batchSize * sizeof(T **);
auto d_lptrs = memAlloc<uchar>(bytes);
auto d_rptrs = memAlloc<uchar>(bytes);
auto d_optrs = memAlloc<uchar>(bytes);
CUDA_CHECK(cudaMemcpyAsync(d_lptrs.get(), lptrs.data(), bytes,
cudaMemcpyHostToDevice, getActiveStream()));
CUDA_CHECK(cudaMemcpyAsync(d_rptrs.get(), rptrs.data(), bytes,
cudaMemcpyHostToDevice, getActiveStream()));
CUDA_CHECK(cudaMemcpyAsync(d_optrs.get(), optrs.data(), bytes,
cudaMemcpyHostToDevice, getActiveStream()));
// Call this before the gemm call so that you don't have to wait for the
// computation. Even though it would make more sense to put it
// afterwards
CUDA_CHECK(cudaStreamSynchronize(getActiveStream()));
using Nt = typename common::kernel_type<T>::native;
CUBLAS_CHECK(gemmBatchedDispatch(
blasHandle(), lOpts, rOpts, M, N, K, alpha,
(const T **)d_lptrs.get(), lStrides[1], (const T **)d_rptrs.get(),
rStrides[1], beta, (T **)d_optrs.get(), oStrides[1], batchSize));
}
}
template<typename T>
Array<T> dot(const Array<T> &lhs, const Array<T> &rhs, af_mat_prop optLhs,
af_mat_prop optRhs) {
auto lhs_ = (optLhs == AF_MAT_NONE ? lhs : conj<T>(lhs));
auto rhs_ = (optRhs == AF_MAT_NONE ? rhs : conj<T>(rhs));
auto temp = arithOp<T, af_mul_t>(lhs_, rhs_, lhs_.dims());
return reduce<af_add_t, T, T>(temp, 0, false, 0);
}
template<typename T>
void trsm(const Array<T> &lhs, Array<T> &rhs, af_mat_prop trans, bool is_upper,
bool is_left, bool is_unit) {
// dim4 lDims = lhs.dims();
dim4 rDims = rhs.dims();
int M = rDims[0];
int N = rDims[1];
T alpha = scalar<T>(1);
dim4 lStrides = lhs.strides();
dim4 rStrides = rhs.strides();
CUBLAS_CHECK(trsm_func<T>()(
blasHandle(), is_left ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT,
is_upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER,
toCblasTranspose(trans),
is_unit ? CUBLAS_DIAG_UNIT : CUBLAS_DIAG_NON_UNIT, M, N, &alpha,
lhs.get(), lStrides[1], rhs.get(), rStrides[1]));
}
#define INSTANTIATE_GEMM(TYPE) \
template void gemm<TYPE>(Array<TYPE> & out, af_mat_prop optLhs, \
af_mat_prop optRhs, const TYPE *alpha, \
const Array<TYPE> &lhs, const Array<TYPE> &rhs, \
const TYPE *beta);
INSTANTIATE_GEMM(float)
INSTANTIATE_GEMM(cfloat)
INSTANTIATE_GEMM(double)
INSTANTIATE_GEMM(cdouble)
INSTANTIATE_GEMM(half)
#define INSTANTIATE_DOT(TYPE) \
template Array<TYPE> dot<TYPE>(const Array<TYPE> &lhs, \
const Array<TYPE> &rhs, af_mat_prop optLhs, \
af_mat_prop optRhs);
INSTANTIATE_DOT(float)
INSTANTIATE_DOT(double)
INSTANTIATE_DOT(cfloat)
INSTANTIATE_DOT(cdouble)
INSTANTIATE_DOT(half)
#define INSTANTIATE_TRSM(TYPE) \
template void trsm<TYPE>(const Array<TYPE> &lhs, Array<TYPE> &rhs, \
af_mat_prop trans, bool is_upper, bool is_left, \
bool is_unit);
INSTANTIATE_TRSM(float)
INSTANTIATE_TRSM(cfloat)
INSTANTIATE_TRSM(double)
INSTANTIATE_TRSM(cdouble)
} // namespace cuda
|
88d8cc59e49a9b6f1b92faa0452597f175350158.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/util/math_functions.hpp"
#include "caffe/common_layers.hpp"
#include "caffe/layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Tile(const int nthreads, const Dtype* bottom_data,
const int tile_size, const int num_tiles, const int bottom_tile_axis,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % tile_size;
const int b = (index / tile_size / num_tiles) % bottom_tile_axis;
const int n = index / tile_size / num_tiles / bottom_tile_axis;
const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d;
top_data[index] = bottom_data[bottom_index];
}
}
template <typename Dtype>
void TileLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int bottom_tile_axis = bottom[0]->shape(axis_);
const int nthreads = top[0]->count();
Tile<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom_data, inner_dim_, tiles_, bottom_tile_axis, top_data);
}
template <typename Dtype>
__global__ void TileBackward(const int nthreads, const Dtype* top_diff,
const int tile_size, const int num_tiles, const int bottom_tile_axis,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % tile_size;
const int b = (index / tile_size) % bottom_tile_axis;
const int n = index / tile_size / bottom_tile_axis;
bottom_diff[index] = 0;
int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;
for (int t = 0; t < num_tiles; ++t) {
bottom_diff[index] += top_diff[top_index];
top_index += bottom_tile_axis * tile_size;
}
}
}
template <typename Dtype>
void TileLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_tile_axis = bottom[0]->shape(axis_);
const int tile_size = inner_dim_ / bottom_tile_axis;
const int nthreads = bottom[0]->count();
TileBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, top_diff, tile_size, tiles_, bottom_tile_axis, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(TileLayer);
} // namespace caffe
| 88d8cc59e49a9b6f1b92faa0452597f175350158.cu | #include <vector>
#include "caffe/util/math_functions.hpp"
#include "caffe/common_layers.hpp"
#include "caffe/layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Tile(const int nthreads, const Dtype* bottom_data,
const int tile_size, const int num_tiles, const int bottom_tile_axis,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % tile_size;
const int b = (index / tile_size / num_tiles) % bottom_tile_axis;
const int n = index / tile_size / num_tiles / bottom_tile_axis;
const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d;
top_data[index] = bottom_data[bottom_index];
}
}
template <typename Dtype>
void TileLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int bottom_tile_axis = bottom[0]->shape(axis_);
const int nthreads = top[0]->count();
Tile<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom_data, inner_dim_, tiles_, bottom_tile_axis, top_data);
}
template <typename Dtype>
__global__ void TileBackward(const int nthreads, const Dtype* top_diff,
const int tile_size, const int num_tiles, const int bottom_tile_axis,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % tile_size;
const int b = (index / tile_size) % bottom_tile_axis;
const int n = index / tile_size / bottom_tile_axis;
bottom_diff[index] = 0;
int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;
for (int t = 0; t < num_tiles; ++t) {
bottom_diff[index] += top_diff[top_index];
top_index += bottom_tile_axis * tile_size;
}
}
}
template <typename Dtype>
void TileLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_tile_axis = bottom[0]->shape(axis_);
const int tile_size = inner_dim_ / bottom_tile_axis;
const int nthreads = bottom[0]->count();
TileBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, top_diff, tile_size, tiles_, bottom_tile_axis, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(TileLayer);
} // namespace caffe
|
858f7001af141fc9c09b3660436b2d7d533d442e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgerbt.cu, normal z -> c, Mon Jun 25 18:24:11 2018
@author Adrien REMY
*/
#include "magma_internal.h"
#include "cgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/***************************************************************************//**
Purpose
-------
CPRBT_MVT compute B = UTB to randomize B
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in]
du COMPLEX array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in,out]
db COMPLEX array, dimension (n)
The n vector db computed by CGESV_NOPIV_GPU
On exit db = du*db
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_cprbt_mtv(
magma_int_t n,
magmaFloatComplex *du, magmaFloatComplex *db,
magma_queue_t queue)
{
/*
*/
magma_int_t threads = block_length;
magma_int_t grid = magma_ceildiv( n, 4*block_length );
hipLaunchKernelGGL(( magmablas_capply_transpose_vector_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, du, n, db, 0);
hipLaunchKernelGGL(( magmablas_capply_transpose_vector_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, du, n+n/2, db, n/2);
threads = block_length;
grid = magma_ceildiv( n, 2*block_length );
hipLaunchKernelGGL(( magmablas_capply_transpose_vector_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, du, 0, db, 0);
}
/***************************************************************************//**
Purpose
-------
CPRBT_MV compute B = VB to obtain the non randomized solution
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in,out]
db COMPLEX array, dimension (n)
The n vector db computed by CGESV_NOPIV_GPU
On exit db = dv*db
@param[in]
dv COMPLEX array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_cprbt_mv(
magma_int_t n,
magmaFloatComplex *dv, magmaFloatComplex *db,
magma_queue_t queue)
{
magma_int_t threads = block_length;
magma_int_t grid = magma_ceildiv( n, 2*block_length );
hipLaunchKernelGGL(( magmablas_capply_vector_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dv, 0, db, 0);
threads = block_length;
grid = magma_ceildiv( n, 4*block_length );
hipLaunchKernelGGL(( magmablas_capply_vector_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dv, n, db, 0);
hipLaunchKernelGGL(( magmablas_capply_vector_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dv, n+n/2, db, n/2);
}
/***************************************************************************//**
Purpose
-------
CPRBT randomize a square general matrix using partial randomized transformation
Arguments
---------
@param[in]
n INTEGER
The number of columns and rows of the matrix dA. n >= 0.
@param[in,out]
dA COMPLEX array, dimension (n,ldda)
The n-by-n matrix dA
On exit dA = duT*dA*d_V
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDA >= max(1,n).
@param[in]
du COMPLEX array, dimension (n,2)
The 2*n vector representing the random butterfly matrix U
@param[in]
dv COMPLEX array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_cprbt(
magma_int_t n,
magmaFloatComplex *dA, magma_int_t ldda,
magmaFloatComplex *du, magmaFloatComplex *dv,
magma_queue_t queue)
{
du += ldda;
dv += ldda;
dim3 threads(block_height, block_width);
dim3 grid( magma_ceildiv( n, 4*block_height ),
magma_ceildiv( n, 4*block_width ));
hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA, 0, ldda, du, 0, dv, 0);
hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA, ldda*n/2, ldda, du, 0, dv, n/2);
hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA, n/2, ldda, du, n/2, dv, 0);
hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA, ldda*n/2+n/2, ldda, du, n/2, dv, n/2);
dim3 threads2(block_height, block_width);
dim3 grid2( magma_ceildiv( n, 2*block_height ),
magma_ceildiv( n, 2*block_width ));
hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel), dim3(grid2), dim3(threads2), 0, queue->cuda_stream() , n, dA, 0, ldda, du, -ldda, dv, -ldda);
}
| 858f7001af141fc9c09b3660436b2d7d533d442e.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgerbt.cu, normal z -> c, Mon Jun 25 18:24:11 2018
@author Adrien REMY
*/
#include "magma_internal.h"
#include "cgerbt.h"
#define block_height 32
#define block_width 4
#define block_length 256
#define NB 64
/***************************************************************************//**
Purpose
-------
CPRBT_MVT compute B = UTB to randomize B
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in]
du COMPLEX array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in,out]
db COMPLEX array, dimension (n)
The n vector db computed by CGESV_NOPIV_GPU
On exit db = du*db
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_cprbt_mtv(
magma_int_t n,
magmaFloatComplex *du, magmaFloatComplex *db,
magma_queue_t queue)
{
/*
*/
magma_int_t threads = block_length;
magma_int_t grid = magma_ceildiv( n, 4*block_length );
magmablas_capply_transpose_vector_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, du, n, db, 0);
magmablas_capply_transpose_vector_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, du, n+n/2, db, n/2);
threads = block_length;
grid = magma_ceildiv( n, 2*block_length );
magmablas_capply_transpose_vector_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n, du, 0, db, 0);
}
/***************************************************************************//**
Purpose
-------
CPRBT_MV compute B = VB to obtain the non randomized solution
Arguments
---------
@param[in]
n INTEGER
The number of values of db. n >= 0.
@param[in,out]
db COMPLEX array, dimension (n)
The n vector db computed by CGESV_NOPIV_GPU
On exit db = dv*db
@param[in]
dv COMPLEX array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_cprbt_mv(
magma_int_t n,
magmaFloatComplex *dv, magmaFloatComplex *db,
magma_queue_t queue)
{
magma_int_t threads = block_length;
magma_int_t grid = magma_ceildiv( n, 2*block_length );
magmablas_capply_vector_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n, dv, 0, db, 0);
threads = block_length;
grid = magma_ceildiv( n, 4*block_length );
magmablas_capply_vector_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dv, n, db, 0);
magmablas_capply_vector_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dv, n+n/2, db, n/2);
}
/***************************************************************************//**
Purpose
-------
CPRBT randomize a square general matrix using partial randomized transformation
Arguments
---------
@param[in]
n INTEGER
The number of columns and rows of the matrix dA. n >= 0.
@param[in,out]
dA COMPLEX array, dimension (n,ldda)
The n-by-n matrix dA
On exit dA = duT*dA*d_V
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDA >= max(1,n).
@param[in]
du COMPLEX array, dimension (n,2)
The 2*n vector representing the random butterfly matrix U
@param[in]
dv COMPLEX array, dimension (n,2)
The 2*n vector representing the random butterfly matrix V
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C" void
magmablas_cprbt(
magma_int_t n,
magmaFloatComplex *dA, magma_int_t ldda,
magmaFloatComplex *du, magmaFloatComplex *dv,
magma_queue_t queue)
{
du += ldda;
dv += ldda;
dim3 threads(block_height, block_width);
dim3 grid( magma_ceildiv( n, 4*block_height ),
magma_ceildiv( n, 4*block_width ));
magmablas_celementary_multiplication_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA, 0, ldda, du, 0, dv, 0);
magmablas_celementary_multiplication_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA, ldda*n/2, ldda, du, 0, dv, n/2);
magmablas_celementary_multiplication_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA, n/2, ldda, du, n/2, dv, 0);
magmablas_celementary_multiplication_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA, ldda*n/2+n/2, ldda, du, n/2, dv, n/2);
dim3 threads2(block_height, block_width);
dim3 grid2( magma_ceildiv( n, 2*block_height ),
magma_ceildiv( n, 2*block_width ));
magmablas_celementary_multiplication_kernel<<< grid2, threads2, 0, queue->cuda_stream() >>>(n, dA, 0, ldda, du, -ldda, dv, -ldda);
}
|
3e5da670d1ea252a929ab244acea32b93afe1854.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <string>
#define epsilon 0.000001
using namespace std;
void fillArrays(int* data, int* data2, int* backup, int size);
void copyArray(int* data, int* backup, int size);
void unoptimizedSort(int* randomNumbers, int size, FILE* file);
void testIfSorted(int* randomNumbers);
bool gpuSortingTest(int* data);
void cudaSort(int* &data, int size, int blocks, int tasksPerThread, FILE* file);
__global__ void oddEvenKernel(int* data, int size, int tasksPerThread, int index);
int main()
{
srand(time(NULL));
hipError_t cudaStatus = hipSuccess;
FILE* file = fopen("data.txt", "w+");
int* data, *data2, *backup;
fprintf(file, "ODD-EVEN SORTING DATA\n---------------------------------------------\n");
// Sorting, size 100, 1000, 10000, 100000
for (int size = 100; size < 100001; size *= 10)
{
std::cout << "Working on size: " << size << std::endl;
fprintf(file, "DATA SIZE: %i \n", size);
// Allocate memory for arrays
data = (int*)malloc((size + 1) * sizeof(int));
backup = (int*)malloc((size + 1) * sizeof(int));
data2 = (int*)malloc((size + 1) * sizeof(int));
// Fill arrays
fillArrays(data, data2, backup, size);
// CPU SORTING
unoptimizedSort(data, size, file);
// GPU SORTING
for (int tasksPerThread = 1; tasksPerThread < 9; tasksPerThread *= 2)
{
int threads = (size / 2) / tasksPerThread;
int blocks = (threads - 1) / 1024 + 1; // 1024 to match current GPU limitations
// Call GPU helper function
cudaSort(data2, size, blocks, tasksPerThread, file);
// Reset array
copyArray(data2, backup, size);
}
std::cout << std::endl << "------------------------------------------------------------------" << std::endl;
// Release array memory
free(data);
free(data2);
free(backup);
}
// hipDeviceReset
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipDeviceReset function in main failed.");
return 1;
}
fclose(file);
std::cout << "FINISHED! Press any key to exit." << std::endl;
std::cin.get();
return 0;
}
void fillArrays(int* data, int* data2, int* backup, int size)
{
for (int i = 0; i < size; ++i)
{
data[i] = data2[i] = backup[i] = rand() % size + 1;
}
}
void copyArray(int* data, int* backup, int size)
{
for (int i = 0; i < size; ++i)
{
data[i] = backup[i];
}
}
void unoptimizedSort(int* randomNumbers, int size, FILE* file)
{
clock_t t;
t = clock();
bool sorted = false;
// Loop until sorted
while (!sorted)
{
int index = 0;
sorted = true;
// Sort even indices
for (index; index < size - 2; index += 2)
{
if (randomNumbers[index] > randomNumbers[index + 1])
{
int temp = randomNumbers[index];
randomNumbers[index] = randomNumbers[index + 1];
randomNumbers[index + 1] = temp;
sorted = false;
}
}
//std::cout << "CPU - Finished sorting even indices" << std::endl;
// Sort odd indices
index = 1;
for (index; index < size - 2; index += 2)
{
if (randomNumbers[index] > randomNumbers[index + 1])
{
int temp = randomNumbers[index];
randomNumbers[index] = randomNumbers[index + 1];
randomNumbers[index + 1] = temp;
sorted = false;
}
}
//std::cout << "CPU - Finished sorting odd indices" << std::endl;
}
std::cout << "CPU - Finished Sorting" << std::endl;
t = clock() - t;
std::cout << "CPU Odd-Even Sorting took: " << ((float)t)/CLOCKS_PER_SEC << " seconds.";
fprintf(file, "\nCPU: %.4f \n", ((float)t) / CLOCKS_PER_SEC);
testIfSorted(randomNumbers);
}
void testIfSorted(int* randomNumbers)
{
// Loop through array and check if sorted
bool sorted = true;
for (int i = 1; i < sizeof(randomNumbers); ++i)
{
if (randomNumbers[i] < randomNumbers[i - 1])
sorted = false;
}
if (sorted)
cout << endl << "The array is sorted!" << endl;
else
cout << endl << "The array is not sorted..." << endl;
}
bool gpuSortingTest(int* data)
{
// Loop through array and check if sorted
bool sorted = true;
for (int i = 1; i < sizeof(data); ++i)
{
if (data[i] < data[i - 1])
sorted = false;
}
return sorted;
}
// CUDA allocating function
void cudaSort(int* &data, int size, int blocks, int tasksPerThread, FILE* file)
{
int* devArray = 0;
clock_t t;
t = clock();
// Allocate array to GPU
hipError_t cudaStatus = hipMalloc((void**)&devArray, (size + 1) * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed for array\n");
return;
}
// Copy array data to GPU
cudaStatus = hipMemcpy(devArray, data, (size + 1) * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed for CPU -> GPU\n");
return;
}
// Create temp array to retrieve array back from GPU
int* tempArray = (int*)malloc((size + 1) * sizeof(int));
// Call kernel function
bool sorted = false;
while (!sorted)
{
// Odd Even sort
// Every thread starts with even indices
for (int i = 0; i < size / 2 + 1; ++i)
{
oddEvenKernel << <blocks, 1024 >> > (devArray, size, tasksPerThread, 0);
oddEvenKernel << <blocks, 1024 >> > (devArray, size, tasksPerThread, 1);
}
// Retrieve sorted array back from GPU
cudaStatus = hipMemcpy((void*)tempArray, (void*)devArray, (size + 1) * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed for GPU -> CPU\n");
return;
}
sorted = gpuSortingTest(tempArray);
if (!sorted)
{
cout << "Should be sorted, but here we go again bois" << endl;
}
}
data = tempArray;
testIfSorted(data);
t = clock() - t;
std::cout << "GPU sorting took: " << ((float)t) / CLOCKS_PER_SEC << " seconds.)" << endl;
fprintf(file, "GPU: %.4f \n", ((float)t) / CLOCKS_PER_SEC);
hipFree(devArray);
hipFree(tempArray);
}
// GPU Kernel function
__global__ void oddEvenKernel(int* data, int size, int tasksPerThread, int index)
{
int start = (threadIdx.x + blockIdx.x * blockDim.x) * 2 * tasksPerThread + index;
// Sorting
for (int element = 0; element < tasksPerThread * 2; element+=2)
{
int currentIndex = start + element;
if (currentIndex >= size)
return;
if (data[currentIndex] > data[currentIndex + 1])
{
int temp = data[currentIndex];
data[currentIndex] = data[currentIndex + 1];
data[currentIndex + 1] = temp;
}
}
} | 3e5da670d1ea252a929ab244acea32b93afe1854.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <string>
#define epsilon 0.000001
using namespace std;
void fillArrays(int* data, int* data2, int* backup, int size);
void copyArray(int* data, int* backup, int size);
void unoptimizedSort(int* randomNumbers, int size, FILE* file);
void testIfSorted(int* randomNumbers);
bool gpuSortingTest(int* data);
void cudaSort(int* &data, int size, int blocks, int tasksPerThread, FILE* file);
__global__ void oddEvenKernel(int* data, int size, int tasksPerThread, int index);
int main()
{
srand(time(NULL));
cudaError_t cudaStatus = cudaSuccess;
FILE* file = fopen("data.txt", "w+");
int* data, *data2, *backup;
fprintf(file, "ODD-EVEN SORTING DATA\n---------------------------------------------\n");
// Sorting, size 100, 1000, 10000, 100000
for (int size = 100; size < 100001; size *= 10)
{
std::cout << "Working on size: " << size << std::endl;
fprintf(file, "DATA SIZE: %i \n", size);
// Allocate memory for arrays
data = (int*)malloc((size + 1) * sizeof(int));
backup = (int*)malloc((size + 1) * sizeof(int));
data2 = (int*)malloc((size + 1) * sizeof(int));
// Fill arrays
fillArrays(data, data2, backup, size);
// CPU SORTING
unoptimizedSort(data, size, file);
// GPU SORTING
for (int tasksPerThread = 1; tasksPerThread < 9; tasksPerThread *= 2)
{
int threads = (size / 2) / tasksPerThread;
int blocks = (threads - 1) / 1024 + 1; // 1024 to match current GPU limitations
// Call GPU helper function
cudaSort(data2, size, blocks, tasksPerThread, file);
// Reset array
copyArray(data2, backup, size);
}
std::cout << std::endl << "------------------------------------------------------------------" << std::endl;
// Release array memory
free(data);
free(data2);
free(backup);
}
// cudaDeviceReset
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceReset function in main failed.");
return 1;
}
fclose(file);
std::cout << "FINISHED! Press any key to exit." << std::endl;
std::cin.get();
return 0;
}
void fillArrays(int* data, int* data2, int* backup, int size)
{
for (int i = 0; i < size; ++i)
{
data[i] = data2[i] = backup[i] = rand() % size + 1;
}
}
void copyArray(int* data, int* backup, int size)
{
for (int i = 0; i < size; ++i)
{
data[i] = backup[i];
}
}
void unoptimizedSort(int* randomNumbers, int size, FILE* file)
{
clock_t t;
t = clock();
bool sorted = false;
// Loop until sorted
while (!sorted)
{
int index = 0;
sorted = true;
// Sort even indices
for (index; index < size - 2; index += 2)
{
if (randomNumbers[index] > randomNumbers[index + 1])
{
int temp = randomNumbers[index];
randomNumbers[index] = randomNumbers[index + 1];
randomNumbers[index + 1] = temp;
sorted = false;
}
}
//std::cout << "CPU - Finished sorting even indices" << std::endl;
// Sort odd indices
index = 1;
for (index; index < size - 2; index += 2)
{
if (randomNumbers[index] > randomNumbers[index + 1])
{
int temp = randomNumbers[index];
randomNumbers[index] = randomNumbers[index + 1];
randomNumbers[index + 1] = temp;
sorted = false;
}
}
//std::cout << "CPU - Finished sorting odd indices" << std::endl;
}
std::cout << "CPU - Finished Sorting" << std::endl;
t = clock() - t;
std::cout << "CPU Odd-Even Sorting took: " << ((float)t)/CLOCKS_PER_SEC << " seconds.";
fprintf(file, "\nCPU: %.4f \n", ((float)t) / CLOCKS_PER_SEC);
testIfSorted(randomNumbers);
}
void testIfSorted(int* randomNumbers)
{
// Loop through array and check if sorted
bool sorted = true;
for (int i = 1; i < sizeof(randomNumbers); ++i)
{
if (randomNumbers[i] < randomNumbers[i - 1])
sorted = false;
}
if (sorted)
cout << endl << "The array is sorted!" << endl;
else
cout << endl << "The array is not sorted..." << endl;
}
bool gpuSortingTest(int* data)
{
// Loop through array and check if sorted
bool sorted = true;
for (int i = 1; i < sizeof(data); ++i)
{
if (data[i] < data[i - 1])
sorted = false;
}
return sorted;
}
// CUDA allocating function
void cudaSort(int* &data, int size, int blocks, int tasksPerThread, FILE* file)
{
int* devArray = 0;
clock_t t;
t = clock();
// Allocate array to GPU
cudaError_t cudaStatus = cudaMalloc((void**)&devArray, (size + 1) * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed for array\n");
return;
}
// Copy array data to GPU
cudaStatus = cudaMemcpy(devArray, data, (size + 1) * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for CPU -> GPU\n");
return;
}
// Create temp array to retrieve array back from GPU
int* tempArray = (int*)malloc((size + 1) * sizeof(int));
// Call kernel function
bool sorted = false;
while (!sorted)
{
// Odd Even sort
// Every thread starts with even indices
for (int i = 0; i < size / 2 + 1; ++i)
{
oddEvenKernel << <blocks, 1024 >> > (devArray, size, tasksPerThread, 0);
oddEvenKernel << <blocks, 1024 >> > (devArray, size, tasksPerThread, 1);
}
// Retrieve sorted array back from GPU
cudaStatus = cudaMemcpy((void*)tempArray, (void*)devArray, (size + 1) * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for GPU -> CPU\n");
return;
}
sorted = gpuSortingTest(tempArray);
if (!sorted)
{
cout << "Should be sorted, but here we go again bois" << endl;
}
}
data = tempArray;
testIfSorted(data);
t = clock() - t;
std::cout << "GPU sorting took: " << ((float)t) / CLOCKS_PER_SEC << " seconds.)" << endl;
fprintf(file, "GPU: %.4f \n", ((float)t) / CLOCKS_PER_SEC);
cudaFree(devArray);
cudaFree(tempArray);
}
// GPU Kernel function
__global__ void oddEvenKernel(int* data, int size, int tasksPerThread, int index)
{
int start = (threadIdx.x + blockIdx.x * blockDim.x) * 2 * tasksPerThread + index;
// Sorting
for (int element = 0; element < tasksPerThread * 2; element+=2)
{
int currentIndex = start + element;
if (currentIndex >= size)
return;
if (data[currentIndex] > data[currentIndex + 1])
{
int temp = data[currentIndex];
data[currentIndex] = data[currentIndex + 1];
data[currentIndex + 1] = temp;
}
}
} |
6ea08534895d5b58b773646ef9c8e149d69fc16f.hip | // !!! This is a file automatically generated by hipify!!!
#include <THH/THHTensorSort.cuh>
#include <THH/THHTensor.hpp>
#include <THH/generic/THHTensorSort.hip>
#include <THH/THHGenerateCharType.h>
| 6ea08534895d5b58b773646ef9c8e149d69fc16f.cu | #include <THC/THCTensorSort.cuh>
#include <THC/THCTensor.hpp>
#include <THC/generic/THCTensorSort.cu>
#include <THC/THCGenerateCharType.h>
|
704bd1213b1ad1293339217937bbce3554216a1c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_fdivide.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_fdivide), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_fdivide), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_fdivide), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 704bd1213b1ad1293339217937bbce3554216a1c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_fdivide.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_fdivide<<<gridBlock,threadBlock>>>(n,result,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_fdivide<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_fdivide<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
85af0003d518f605f9beed7c9ebcac374adfe7e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include "bbx.h"
#include "utils/cuda.cuh"
template<typename T>
__device__ inline T clamp(T x, T a, T b) {
return max(a, min(b, x));
}
template<typename T, typename index_t>
__global__ void mask_count_kernel(const at::PackedTensorAccessor<T, 2, at::RestrictPtrTraits, index_t> bbx,
const at::PackedTensorAccessor<T, 2, at::RestrictPtrTraits, index_t> int_mask,
at::PackedTensorAccessor<T, 1, at::RestrictPtrTraits, index_t> count) {
index_t num = bbx.size(0), height = int_mask.size(0), width = int_mask.size(1);
index_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < num) {
auto _bbx = bbx[n];
int i0 = clamp(static_cast<index_t>(_bbx[0]), index_t(0), height - 1),
j0 = clamp(static_cast<index_t>(_bbx[1]), index_t(0), width - 1),
i1 = clamp(static_cast<index_t>(_bbx[2]), index_t(0), height - 1),
j1 = clamp(static_cast<index_t>(_bbx[3]), index_t(0), width - 1);
count[n] = int_mask[i1][j1] - int_mask[i0][j1] - int_mask[i1][j0] + int_mask[i0][j0];
}
}
at::Tensor mask_count_cuda(const at::Tensor& bbx, const at::Tensor& int_mask) {
// Get dimensions
auto num = bbx.size(0);
// Create output
auto count = at::zeros({num}, bbx.options());
// Run kernel
dim3 threads(getNumThreads(num));
dim3 blocks((num + threads.x - 1) / threads.x);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
AT_DISPATCH_FLOATING_TYPES(bbx.scalar_type(), "mask_count_cuda", ([&] {
if (at::cuda::detail::canUse32BitIndexMath(int_mask)) {
auto _bbx = bbx.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, int32_t>();
auto _int_mask = int_mask.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, int32_t>();
auto _count = count.packed_accessor<scalar_t, 1, at::RestrictPtrTraits, int32_t>();
hipLaunchKernelGGL(( mask_count_kernel<scalar_t, int32_t>), dim3(blocks), dim3(threads), 0, stream, _bbx, _int_mask, _count);
} else {
auto _bbx = bbx.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, int64_t>();
auto _int_mask = int_mask.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, int64_t>();
auto _count = count.packed_accessor<scalar_t, 1, at::RestrictPtrTraits, int64_t>();
hipLaunchKernelGGL(( mask_count_kernel<scalar_t, int64_t>), dim3(blocks), dim3(threads), 0, stream, _bbx, _int_mask, _count);
}
}));
return count;
} | 85af0003d518f605f9beed7c9ebcac374adfe7e0.cu | #include <ATen/ATen.h>
#include <c10/cuda/CUDAStream.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include "bbx.h"
#include "utils/cuda.cuh"
template<typename T>
__device__ inline T clamp(T x, T a, T b) {
return max(a, min(b, x));
}
template<typename T, typename index_t>
__global__ void mask_count_kernel(const at::PackedTensorAccessor<T, 2, at::RestrictPtrTraits, index_t> bbx,
const at::PackedTensorAccessor<T, 2, at::RestrictPtrTraits, index_t> int_mask,
at::PackedTensorAccessor<T, 1, at::RestrictPtrTraits, index_t> count) {
index_t num = bbx.size(0), height = int_mask.size(0), width = int_mask.size(1);
index_t n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < num) {
auto _bbx = bbx[n];
int i0 = clamp(static_cast<index_t>(_bbx[0]), index_t(0), height - 1),
j0 = clamp(static_cast<index_t>(_bbx[1]), index_t(0), width - 1),
i1 = clamp(static_cast<index_t>(_bbx[2]), index_t(0), height - 1),
j1 = clamp(static_cast<index_t>(_bbx[3]), index_t(0), width - 1);
count[n] = int_mask[i1][j1] - int_mask[i0][j1] - int_mask[i1][j0] + int_mask[i0][j0];
}
}
at::Tensor mask_count_cuda(const at::Tensor& bbx, const at::Tensor& int_mask) {
// Get dimensions
auto num = bbx.size(0);
// Create output
auto count = at::zeros({num}, bbx.options());
// Run kernel
dim3 threads(getNumThreads(num));
dim3 blocks((num + threads.x - 1) / threads.x);
auto stream = at::cuda::getCurrentCUDAStream().stream();
AT_DISPATCH_FLOATING_TYPES(bbx.scalar_type(), "mask_count_cuda", ([&] {
if (at::cuda::detail::canUse32BitIndexMath(int_mask)) {
auto _bbx = bbx.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, int32_t>();
auto _int_mask = int_mask.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, int32_t>();
auto _count = count.packed_accessor<scalar_t, 1, at::RestrictPtrTraits, int32_t>();
mask_count_kernel<scalar_t, int32_t><<<blocks, threads, 0, stream>>>(_bbx, _int_mask, _count);
} else {
auto _bbx = bbx.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, int64_t>();
auto _int_mask = int_mask.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, int64_t>();
auto _count = count.packed_accessor<scalar_t, 1, at::RestrictPtrTraits, int64_t>();
mask_count_kernel<scalar_t, int64_t><<<blocks, threads, 0, stream>>>(_bbx, _int_mask, _count);
}
}));
return count;
} |
b9c355e275b7ff556e7602743fa62676c0730a5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "G2PKernels.cuh"
#include <MnBase/Math/Matrix/MatrixKernels.cuh>
#include <System/CudaDevice/CudaDeviceUtils.cuh>
#include <Simulation/ConstitutiveModel/ConstitutiveModelKernels.cuh>
#include <cstdio>
namespace mn {
__global__ void G2P_MLS(const int numParticle,
const int *d_targetPages,
const int *d_virtualPageOffsets,
const int **smallest_nodes,
int *d_block_offsets,
int *d_cellids,
int *d_indices,
int *d_indexTrans,
T **d_sorted_positions,
T **d_sorted_velocities,
T **d_channels,
T *d_sorted_F,
T *d_B,
T *d_tmp,
T dt,
int **d_adjPage) {
__shared__ T buffer[3][8][8][8];
int pageid = d_targetPages[blockIdx.x] - 1; // from virtual to physical page
int cellid = d_block_offsets[pageid]; //
int relParid =
512 * (blockIdx.x - d_virtualPageOffsets[pageid]) + threadIdx.x;
int parid = cellid + relParid;
int block = threadIdx.x & 0x3f;
int ci = block >> 4;
int cj = (block & 0xc) >> 2;
int ck = block & 3;
block = threadIdx.x >> 6;
int bi = block >> 2;
int bj = (block & 2) >> 1;
int bk = block & 1;
int page_idx = block ? d_adjPage[block - 1][pageid] : pageid;
// vel
for (int v = 0; v < 3; ++v)
buffer[v][bi * 4 + ci][bj * 4 + cj][bk * 4 + ck] =
*((T *)((uint64_t)d_channels[1 + v] + (int)page_idx * 4096) +
(ci * 16 + cj * 4 + ck));
__syncthreads();
int smallest_node[3];
if (relParid < d_block_offsets[pageid + 1] - d_block_offsets[pageid]) {
cellid = d_cellids[parid] - 1;
T wOneD[3][3];
smallest_node[0] = smallest_nodes[0][cellid];
smallest_node[1] = smallest_nodes[1][cellid];
smallest_node[2] = smallest_nodes[2][cellid];
T xp[3];
xp[0] = d_sorted_positions[0][parid] - smallest_node[0] * dx;
xp[1] = d_sorted_positions[1][parid] - smallest_node[1] * dx;
xp[2] = d_sorted_positions[2][parid] - smallest_node[2] * dx;
for (int v = 0; v < 3; ++v) {
T d0 = xp[v] * one_over_dx;
T z = ((T)1.5 - d0);
wOneD[v][0] = (T)0.5 * z * z;
d0 = d0 - 1.0f;
wOneD[v][1] = (T)0.75 - d0 * d0;
z = (T)1.5 - (1.0f - d0);
wOneD[v][2] = (T)0.5 * z * z;
}
int c = 0;
float tmp[27];
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 3; ++k) {
tmp[c++] = wOneD[0][i] * wOneD[1][j] * wOneD[2][k];
}
}
}
for (int v = 0; v < 3; ++v)
smallest_node[v] = smallest_node[v] & 0x3;
T val[9];
for (int i = 0; i < 3; ++i)
val[i] = 0.f;
c = 0;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 3; ++k) {
// v_pic
val[0] += tmp[c] * buffer[0][smallest_node[0] + i]
[smallest_node[1] + j][smallest_node[2] + k];
val[1] += tmp[c] * buffer[1][smallest_node[0] + i]
[smallest_node[1] + j][smallest_node[2] + k];
val[2] += tmp[c++] *
buffer[2][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k];
}
}
}
d_tmp[parid] = val[0];
d_tmp[parid + numParticle] = val[1];
d_tmp[parid + numParticle * 2] = val[2];
d_sorted_positions[0][parid] += val[0] * dt;
d_sorted_positions[1][parid] += val[1] * dt;
d_sorted_positions[2][parid] += val[2] * dt;
for (int i = 0; i < 9; ++i)
val[i] = 0.f;
c = 0;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 3; ++k) {
// B
val[0] += tmp[c] *
buffer[0][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(i * dx - xp[0]);
val[1] += tmp[c] *
buffer[1][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(i * dx - xp[0]);
val[2] += tmp[c] *
buffer[2][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(i * dx - xp[0]);
val[3] += tmp[c] *
buffer[0][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(j * dx - xp[1]);
val[4] += tmp[c] *
buffer[1][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(j * dx - xp[1]);
val[5] += tmp[c] *
buffer[2][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(j * dx - xp[1]);
val[6] += tmp[c] *
buffer[0][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(k * dx - xp[2]);
val[7] += tmp[c] *
buffer[1][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(k * dx - xp[2]);
val[8] += tmp[c++] *
buffer[2][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(k * dx - xp[2]);
}
}
}
for (int i = 0; i < 9; ++i)
d_tmp[parid + (i + 3) * numParticle] = val[i];
for (int i = 0; i < 9; ++i)
val[i] = val[i] * dt * D_inverse;
val[0] += 1.f;
val[4] += 1.f;
val[8] += 1.f;
T F[9];
int parid_trans = d_indexTrans[parid];
for (int i = 0; i < 9; ++i)
F[i] = d_sorted_F[parid_trans + i * numParticle];
T result[9];
matrixMatrixMultiplication(&(val[0]), F, result);
for (int i = 0; i < 9; ++i)
d_tmp[parid + (i + 12) * numParticle] = result[i];
}
}
} // namespace mn
| b9c355e275b7ff556e7602743fa62676c0730a5f.cu | #include "G2PKernels.cuh"
#include <MnBase/Math/Matrix/MatrixKernels.cuh>
#include <System/CudaDevice/CudaDeviceUtils.cuh>
#include <Simulation/ConstitutiveModel/ConstitutiveModelKernels.cuh>
#include <cstdio>
namespace mn {
__global__ void G2P_MLS(const int numParticle,
const int *d_targetPages,
const int *d_virtualPageOffsets,
const int **smallest_nodes,
int *d_block_offsets,
int *d_cellids,
int *d_indices,
int *d_indexTrans,
T **d_sorted_positions,
T **d_sorted_velocities,
T **d_channels,
T *d_sorted_F,
T *d_B,
T *d_tmp,
T dt,
int **d_adjPage) {
__shared__ T buffer[3][8][8][8];
int pageid = d_targetPages[blockIdx.x] - 1; // from virtual to physical page
int cellid = d_block_offsets[pageid]; //
int relParid =
512 * (blockIdx.x - d_virtualPageOffsets[pageid]) + threadIdx.x;
int parid = cellid + relParid;
int block = threadIdx.x & 0x3f;
int ci = block >> 4;
int cj = (block & 0xc) >> 2;
int ck = block & 3;
block = threadIdx.x >> 6;
int bi = block >> 2;
int bj = (block & 2) >> 1;
int bk = block & 1;
int page_idx = block ? d_adjPage[block - 1][pageid] : pageid;
// vel
for (int v = 0; v < 3; ++v)
buffer[v][bi * 4 + ci][bj * 4 + cj][bk * 4 + ck] =
*((T *)((uint64_t)d_channels[1 + v] + (int)page_idx * 4096) +
(ci * 16 + cj * 4 + ck));
__syncthreads();
int smallest_node[3];
if (relParid < d_block_offsets[pageid + 1] - d_block_offsets[pageid]) {
cellid = d_cellids[parid] - 1;
T wOneD[3][3];
smallest_node[0] = smallest_nodes[0][cellid];
smallest_node[1] = smallest_nodes[1][cellid];
smallest_node[2] = smallest_nodes[2][cellid];
T xp[3];
xp[0] = d_sorted_positions[0][parid] - smallest_node[0] * dx;
xp[1] = d_sorted_positions[1][parid] - smallest_node[1] * dx;
xp[2] = d_sorted_positions[2][parid] - smallest_node[2] * dx;
for (int v = 0; v < 3; ++v) {
T d0 = xp[v] * one_over_dx;
T z = ((T)1.5 - d0);
wOneD[v][0] = (T)0.5 * z * z;
d0 = d0 - 1.0f;
wOneD[v][1] = (T)0.75 - d0 * d0;
z = (T)1.5 - (1.0f - d0);
wOneD[v][2] = (T)0.5 * z * z;
}
int c = 0;
float tmp[27];
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 3; ++k) {
tmp[c++] = wOneD[0][i] * wOneD[1][j] * wOneD[2][k];
}
}
}
for (int v = 0; v < 3; ++v)
smallest_node[v] = smallest_node[v] & 0x3;
T val[9];
for (int i = 0; i < 3; ++i)
val[i] = 0.f;
c = 0;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 3; ++k) {
// v_pic
val[0] += tmp[c] * buffer[0][smallest_node[0] + i]
[smallest_node[1] + j][smallest_node[2] + k];
val[1] += tmp[c] * buffer[1][smallest_node[0] + i]
[smallest_node[1] + j][smallest_node[2] + k];
val[2] += tmp[c++] *
buffer[2][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k];
}
}
}
d_tmp[parid] = val[0];
d_tmp[parid + numParticle] = val[1];
d_tmp[parid + numParticle * 2] = val[2];
d_sorted_positions[0][parid] += val[0] * dt;
d_sorted_positions[1][parid] += val[1] * dt;
d_sorted_positions[2][parid] += val[2] * dt;
for (int i = 0; i < 9; ++i)
val[i] = 0.f;
c = 0;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 3; ++k) {
// B
val[0] += tmp[c] *
buffer[0][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(i * dx - xp[0]);
val[1] += tmp[c] *
buffer[1][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(i * dx - xp[0]);
val[2] += tmp[c] *
buffer[2][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(i * dx - xp[0]);
val[3] += tmp[c] *
buffer[0][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(j * dx - xp[1]);
val[4] += tmp[c] *
buffer[1][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(j * dx - xp[1]);
val[5] += tmp[c] *
buffer[2][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(j * dx - xp[1]);
val[6] += tmp[c] *
buffer[0][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(k * dx - xp[2]);
val[7] += tmp[c] *
buffer[1][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(k * dx - xp[2]);
val[8] += tmp[c++] *
buffer[2][smallest_node[0] + i][smallest_node[1] + j]
[smallest_node[2] + k] *
(k * dx - xp[2]);
}
}
}
for (int i = 0; i < 9; ++i)
d_tmp[parid + (i + 3) * numParticle] = val[i];
for (int i = 0; i < 9; ++i)
val[i] = val[i] * dt * D_inverse;
val[0] += 1.f;
val[4] += 1.f;
val[8] += 1.f;
T F[9];
int parid_trans = d_indexTrans[parid];
for (int i = 0; i < 9; ++i)
F[i] = d_sorted_F[parid_trans + i * numParticle];
T result[9];
matrixMatrixMultiplication(&(val[0]), F, result);
for (int i = 0; i < 9; ++i)
d_tmp[parid + (i + 12) * numParticle] = result[i];
}
}
} // namespace mn
|
1c521193ff587f85d204ba72ee3606e57a91670a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
//1 prac 2
//2 prac 3
#define prac 2
#define CUDA_CALL(x) { const hipError_t a = (x); if(a != hipSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", hipGetErrorString(a), a, __LINE__); hipDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
hipEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(hipEventCreate(&cuda_timer_start));
CUDA_CALL(hipEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(hipEventDestroy(cuda_timer_start));
CUDA_CALL(hipEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
hipEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
hipEventRecord(cuda_timer_stop, CUDA_STREAM_0);
hipEventSynchronize(cuda_timer_stop);
hipEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
#if prac==1
typedef struct {
int width;
int height;
float* elements;
} Array;
#define MAX_N_ELEMENTS (1 << 20)
void generate_random_float_array(float* array, int n) {
int i;
for (i = 0; i < n; i++) {
array[i] = 3.1415926f * ((float)rand() / RAND_MAX);
}
}
void combine_two_arrays(float* x, float* y, float* z, int n) {
int i;
for (i = 0; i < n; i++) {
z[i] = 1.0f / (sin(x[i]) * cos(y[i]) + cos(x[i]) * sin(y[i]));
}
}
__global__ void CombineTwoArrraysKernel(Array A, Array B, Array C) {
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int id = gridDim.x * blockDim.x * row + col;
C.elements[id] = 1.0f / (sin(A.elements[id]) * cos(B.elements[id]) + cos(A.elements[id]) * sin(B.elements[id]));
}
hipError_t combine_two_arrays_GPU(const Array A, const Array B, Array C);
int BLOCK_SIZE = 16;
int main()
{
int n_elements;
srand((unsigned int)time(NULL));
n_elements = MAX_N_ELEMENTS;
Array A, B, C, G;
A.width = B.width = C.width = G.width = 1024;
A.height = B.height = C.height = G.height = MAX_N_ELEMENTS / 1024;
A.elements = (float*)malloc(sizeof(float) * MAX_N_ELEMENTS);
B.elements = (float*)malloc(sizeof(float) * MAX_N_ELEMENTS);
C.elements = (float*)malloc(sizeof(float) * MAX_N_ELEMENTS);
G.elements = (float*)malloc(sizeof(float) * MAX_N_ELEMENTS);
generate_random_float_array(A.elements, MAX_N_ELEMENTS);
generate_random_float_array(B.elements, MAX_N_ELEMENTS);
CHECK_TIME_START;
combine_two_arrays(A.elements, B.elements, C.elements, n_elements);
CHECK_TIME_END(compute_time);
printf("***GPU C[10] = %f/ Time taken = %.6fms\n", C.elements[10], compute_time);
hipError_t cudaStatus = combine_two_arrays_GPU(A, B, G);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "combine_two_arrays_GPU failed!");
return 1;
}
printf("***GPU G[10] = %f/ Time taken = %.6fms\n", G.elements[10], device_time);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
hipError_t combine_two_arrays_GPU(const Array A, const Array B, Array C) {
// .
CHECK_TIME_INIT_GPU()
CHECK_TIME_START_GPU()
CHECK_TIME_END_GPU(device_time)
CHECK_TIME_DEST_GPU()
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}///////////// if(cu..... ==CUDA_CALL
Array d_A, d_B, d_C;
size_t size;
d_A.width = A.width; d_A.height = A.height;
size = A.width * A.height * sizeof(float);
CUDA_CALL(hipMalloc(&d_A.elements, size))
CUDA_CALL(hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice))
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
CUDA_CALL(hipMalloc(&d_B.elements, size))
CUDA_CALL(hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice))
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
CUDA_CALL(hipMalloc(&d_C.elements, size))
// Assume that width and height are multiples of BLOCK SIZE.
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(A.width / dimBlock.x, A.height / dimBlock.y);
CombineTwoArrraysKernel << < dimGrid, dimBlock >> > (d_A, d_B, d_C);
CUDA_CALL(hipGetLastError())
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(hipDeviceSynchronize())
CUDA_CALL(hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost))
Error:
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
return cudaStatus;
}
#endif
#if prac==2
int n;
#define BLOCK_SIZE 32
#define ELEMENT_SIZE (1<<10)
const int ELEM_PER_VECTOR = 32;
float(*pVecX), (*pVecY), (*pVecY_G);
float(*pMatA);
void init_MatVec(void)
{
srand((unsigned)time(NULL));
FILE* fp = fopen("gen.bin", "rb");
fread(&n, sizeof(float), 1, fp);
pVecX = new float[n * ELEM_PER_VECTOR];
pVecY = new float[n * ELEM_PER_VECTOR];
pVecY_G = new float[n * ELEM_PER_VECTOR];
pMatA = new float[ELEM_PER_VECTOR * ELEM_PER_VECTOR];
fread(pVecX, sizeof(float), n * ELEM_PER_VECTOR, fp);
fread(pMatA, sizeof(float), ELEM_PER_VECTOR * ELEM_PER_VECTOR, fp);
fclose(fp);
}
void Mat_Vec_Multiply()
{
int vec_idx, i, j;
for (vec_idx = 0; vec_idx < ELEMENT_SIZE; vec_idx++) {
for (i = 0; i < ELEM_PER_VECTOR; i++) {
float sum = 0;
for (j = 0; j < ELEM_PER_VECTOR; j++) {
sum += pMatA[i * ELEM_PER_VECTOR + j] * pVecX[vec_idx * ELEM_PER_VECTOR + j];
}
pVecY[vec_idx * ELEM_PER_VECTOR + i] = sum;
}
}
}
__global__ void Mat_Vec_Multiply_Kernel(float* d_VecY, float* d_VecX, float* d_MatA, int Vec_Size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x; //blockidx.x block blockDim.x thread threadIdx.x thread
int i, j;
for (i = 0; i < ELEM_PER_VECTOR; i++) {
float sum = 0;
for (j = 0; j < ELEM_PER_VECTOR; j++) {
sum += d_MatA[i * ELEM_PER_VECTOR + j] * d_VecX[id * ELEM_PER_VECTOR + j];
}
d_VecY[id * ELEM_PER_VECTOR + i] = sum;
}
}
void Mat_Vec_Multiply_GPU(float* p_VecX, float* p_MatA, float* p_VecY_G)
{
CHECK_TIME_INIT_GPU()
CHECK_TIME_START_GPU()
CHECK_TIME_END_GPU(device_time)
CHECK_TIME_DEST_GPU()
float* d_VecX;
float* d_MatA;
float* d_VecY_G;
size_t size = sizeof(float) * n * ELEM_PER_VECTOR;
CUDA_CALL(hipMalloc(&d_VecX, size))
CUDA_CALL(hipMemcpy(d_VecX, p_VecX, size, hipMemcpyHostToDevice));
size = sizeof(float) * ELEM_PER_VECTOR * ELEM_PER_VECTOR;
CUDA_CALL(hipMalloc(&d_MatA, size))
CUDA_CALL(hipMemcpy(d_MatA, p_MatA, size, hipMemcpyHostToDevice));
size = sizeof(float) * n * ELEM_PER_VECTOR;
CUDA_CALL(hipMalloc(&d_VecY_G, size))
dim3 dimBlock(BLOCK_SIZE, 1);
dim3 dimGrid(n / dimBlock.x, 1);
Mat_Vec_Multiply_Kernel << < dimGrid, dimBlock >> > (d_VecY_G, d_VecX, d_MatA, ELEM_PER_VECTOR);
j
CUDA_CALL(hipGetLastError())
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(hipDeviceSynchronize())
CUDA_CALL(hipMemcpy(p_VecY_G, d_VecY_G, size, hipMemcpyDeviceToHost))
hipFree(d_MatA);
hipFree(d_VecX);
hipFree(d_VecY_G);
}
void init_data(int size) {
srand((unsigned)time(NULL));
FILE* fp = fopen("gen.bin", "wb");
fwrite(&size, sizeof(int), 1, fp);
int i, j;
float x;
for (i = 0; i < size; i++) {
for (j = 0; j < ELEM_PER_VECTOR; j++) {
x = 2.0f * ((float)rand() / RAND_MAX) - 1.0f;
fwrite(&x, sizeof(float), 1, fp);
}
}
for (i = 0; i < ELEM_PER_VECTOR; i++) {
for (j = 0; j < ELEM_PER_VECTOR; j++) {
x = 2.0f * ((float)rand() / RAND_MAX) - 1.0f;
fwrite(&x, sizeof(float), 1, fp);
}
}
fclose(fp);
return;
}
int main()
{
init_data(ELEMENT_SIZE);
init_MatVec();
printf("n = %d file open ok.\n", n);
CHECK_TIME_START;
Mat_Vec_Multiply();
CHECK_TIME_END(compute_time);
printf("***CPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY[0 * ELEM_PER_VECTOR + 0], compute_time);
Mat_Vec_Multiply_GPU(pVecX, pMatA, pVecY_G);
printf("***GPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY_G[0 * ELEM_PER_VECTOR + 0], device_time);
int vec_idx, i;
for (i = 0; i < ELEMENT_SIZE * ELEM_PER_VECTOR; i++) {
if (fabs(pVecY[i] - pVecY_G[i]) > 0.001) {
printf("Kernel execution fail!!\n\n");
break;
}
}
}
#endif | 1c521193ff587f85d204ba72ee3606e57a91670a.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
//1 prac 2
//2 prac 3
#define prac 2
#define CUDA_CALL(x) { const cudaError_t a = (x); if(a != cudaSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", cudaGetErrorString(a), a, __LINE__); cudaDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
cudaEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(cudaEventCreate(&cuda_timer_start));
CUDA_CALL(cudaEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(cudaEventDestroy(cuda_timer_start));
CUDA_CALL(cudaEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
cudaEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
cudaEventRecord(cuda_timer_stop, CUDA_STREAM_0);
cudaEventSynchronize(cuda_timer_stop);
cudaEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
#if prac==1
typedef struct {
int width;
int height;
float* elements;
} Array;
#define MAX_N_ELEMENTS (1 << 20)
void generate_random_float_array(float* array, int n) {
int i;
for (i = 0; i < n; i++) {
array[i] = 3.1415926f * ((float)rand() / RAND_MAX);
}
}
void combine_two_arrays(float* x, float* y, float* z, int n) {
int i;
for (i = 0; i < n; i++) {
z[i] = 1.0f / (sin(x[i]) * cos(y[i]) + cos(x[i]) * sin(y[i]));
}
}
__global__ void CombineTwoArrraysKernel(Array A, Array B, Array C) {
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int id = gridDim.x * blockDim.x * row + col;
C.elements[id] = 1.0f / (sin(A.elements[id]) * cos(B.elements[id]) + cos(A.elements[id]) * sin(B.elements[id]));
}
cudaError_t combine_two_arrays_GPU(const Array A, const Array B, Array C);
int BLOCK_SIZE = 16;
int main()
{
int n_elements;
srand((unsigned int)time(NULL));
n_elements = MAX_N_ELEMENTS;
Array A, B, C, G;
A.width = B.width = C.width = G.width = 1024;
A.height = B.height = C.height = G.height = MAX_N_ELEMENTS / 1024;
A.elements = (float*)malloc(sizeof(float) * MAX_N_ELEMENTS);
B.elements = (float*)malloc(sizeof(float) * MAX_N_ELEMENTS);
C.elements = (float*)malloc(sizeof(float) * MAX_N_ELEMENTS);
G.elements = (float*)malloc(sizeof(float) * MAX_N_ELEMENTS);
generate_random_float_array(A.elements, MAX_N_ELEMENTS);
generate_random_float_array(B.elements, MAX_N_ELEMENTS);
CHECK_TIME_START;
combine_two_arrays(A.elements, B.elements, C.elements, n_elements);
CHECK_TIME_END(compute_time);
printf("***GPU C[10] = %f/ Time taken = %.6fms\n", C.elements[10], compute_time);
cudaError_t cudaStatus = combine_two_arrays_GPU(A, B, G);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "combine_two_arrays_GPU failed!");
return 1;
}
printf("***GPU G[10] = %f/ Time taken = %.6fms\n", G.elements[10], device_time);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
cudaError_t combine_two_arrays_GPU(const Array A, const Array B, Array C) {
//아래 함수들을 사용하여 어떻게 하면 가급적 정확한 시간을 측정할 수 있을지 생각해볼 것.
CHECK_TIME_INIT_GPU()
CHECK_TIME_START_GPU()
CHECK_TIME_END_GPU(device_time)
CHECK_TIME_DEST_GPU()
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}///////////// if(cu..... ==CUDA_CALL
Array d_A, d_B, d_C;
size_t size;
d_A.width = A.width; d_A.height = A.height;
size = A.width * A.height * sizeof(float);
CUDA_CALL(cudaMalloc(&d_A.elements, size))
CUDA_CALL(cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice))
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
CUDA_CALL(cudaMalloc(&d_B.elements, size))
CUDA_CALL(cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice))
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
CUDA_CALL(cudaMalloc(&d_C.elements, size))
// Assume that width and height are multiples of BLOCK SIZE.
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(A.width / dimBlock.x, A.height / dimBlock.y);
CombineTwoArrraysKernel << < dimGrid, dimBlock >> > (d_A, d_B, d_C);
CUDA_CALL(cudaGetLastError())
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(cudaDeviceSynchronize())
CUDA_CALL(cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost))
Error:
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
return cudaStatus;
}
#endif
#if prac==2
int n;
#define BLOCK_SIZE 32
#define ELEMENT_SIZE (1<<10)
const int ELEM_PER_VECTOR = 32;
float(*pVecX), (*pVecY), (*pVecY_G);
float(*pMatA);
void init_MatVec(void)
{
srand((unsigned)time(NULL));
FILE* fp = fopen("gen.bin", "rb");
fread(&n, sizeof(float), 1, fp);
pVecX = new float[n * ELEM_PER_VECTOR];
pVecY = new float[n * ELEM_PER_VECTOR];
pVecY_G = new float[n * ELEM_PER_VECTOR];
pMatA = new float[ELEM_PER_VECTOR * ELEM_PER_VECTOR];
fread(pVecX, sizeof(float), n * ELEM_PER_VECTOR, fp);
fread(pMatA, sizeof(float), ELEM_PER_VECTOR * ELEM_PER_VECTOR, fp);
fclose(fp);
}
void Mat_Vec_Multiply()
{
int vec_idx, i, j;
for (vec_idx = 0; vec_idx < ELEMENT_SIZE; vec_idx++) {
for (i = 0; i < ELEM_PER_VECTOR; i++) {
float sum = 0;
for (j = 0; j < ELEM_PER_VECTOR; j++) {
sum += pMatA[i * ELEM_PER_VECTOR + j] * pVecX[vec_idx * ELEM_PER_VECTOR + j];
}
pVecY[vec_idx * ELEM_PER_VECTOR + i] = sum;
}
}
}
__global__ void Mat_Vec_Multiply_Kernel(float* d_VecY, float* d_VecX, float* d_MatA, int Vec_Size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x; //blockidx.x번째의 block이 blockDim.x의 thread를 갖고있고 그 중에서도 threadIdx.x번째의 thread
int i, j;
for (i = 0; i < ELEM_PER_VECTOR; i++) {
float sum = 0;
for (j = 0; j < ELEM_PER_VECTOR; j++) {
sum += d_MatA[i * ELEM_PER_VECTOR + j] * d_VecX[id * ELEM_PER_VECTOR + j];
}
d_VecY[id * ELEM_PER_VECTOR + i] = sum;
}
}
void Mat_Vec_Multiply_GPU(float* p_VecX, float* p_MatA, float* p_VecY_G)
{
CHECK_TIME_INIT_GPU()
CHECK_TIME_START_GPU()
CHECK_TIME_END_GPU(device_time)
CHECK_TIME_DEST_GPU()
float* d_VecX;
float* d_MatA;
float* d_VecY_G;
size_t size = sizeof(float) * n * ELEM_PER_VECTOR;
CUDA_CALL(cudaMalloc(&d_VecX, size))
CUDA_CALL(cudaMemcpy(d_VecX, p_VecX, size, cudaMemcpyHostToDevice));
size = sizeof(float) * ELEM_PER_VECTOR * ELEM_PER_VECTOR;
CUDA_CALL(cudaMalloc(&d_MatA, size))
CUDA_CALL(cudaMemcpy(d_MatA, p_MatA, size, cudaMemcpyHostToDevice));
size = sizeof(float) * n * ELEM_PER_VECTOR;
CUDA_CALL(cudaMalloc(&d_VecY_G, size))
dim3 dimBlock(BLOCK_SIZE, 1);
dim3 dimGrid(n / dimBlock.x, 1);
Mat_Vec_Multiply_Kernel << < dimGrid, dimBlock >> > (d_VecY_G, d_VecX, d_MatA, ELEM_PER_VECTOR);
j
CUDA_CALL(cudaGetLastError())
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(cudaDeviceSynchronize())
CUDA_CALL(cudaMemcpy(p_VecY_G, d_VecY_G, size, cudaMemcpyDeviceToHost))
cudaFree(d_MatA);
cudaFree(d_VecX);
cudaFree(d_VecY_G);
}
void init_data(int size) {
srand((unsigned)time(NULL));
FILE* fp = fopen("gen.bin", "wb");
fwrite(&size, sizeof(int), 1, fp);
int i, j;
float x;
for (i = 0; i < size; i++) {
for (j = 0; j < ELEM_PER_VECTOR; j++) {
x = 2.0f * ((float)rand() / RAND_MAX) - 1.0f;
fwrite(&x, sizeof(float), 1, fp);
}
}
for (i = 0; i < ELEM_PER_VECTOR; i++) {
for (j = 0; j < ELEM_PER_VECTOR; j++) {
x = 2.0f * ((float)rand() / RAND_MAX) - 1.0f;
fwrite(&x, sizeof(float), 1, fp);
}
}
fclose(fp);
return;
}
int main()
{
init_data(ELEMENT_SIZE);
init_MatVec();
printf("n = %d file open ok.\n", n);
CHECK_TIME_START;
Mat_Vec_Multiply();
CHECK_TIME_END(compute_time);
printf("***CPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY[0 * ELEM_PER_VECTOR + 0], compute_time);
Mat_Vec_Multiply_GPU(pVecX, pMatA, pVecY_G);
printf("***GPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY_G[0 * ELEM_PER_VECTOR + 0], device_time);
int vec_idx, i;
for (i = 0; i < ELEMENT_SIZE * ELEM_PER_VECTOR; i++) {
if (fabs(pVecY[i] - pVecY_G[i]) > 0.001) {
printf("Kernel execution fail!!\n\n");
break;
}
}
}
#endif |
ea60cebbefba13a72b554ffb80ec1f0ad295f8ba.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS Library handle.
*/
#include <iostream>
#include <stdexcept>
#include <cstdint>
#include "cutlass/library/handle.h"
#include "cutlass/library/singleton.h"
#include "cutlass/library/util.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructor
Handle::Handle(
hipStream_t stream,
size_t workspace_size
):
provider_(Provider::kCUTLASS),
stream_(stream),
workspace_(nullptr),
workspace_size_(0),
scalar_pointer_mode_(ScalarPointerMode::kHost),
last_operation_(nullptr) {
int device_idx = -1;
hipError_t error = hipGetDevice(&device_idx);
if (error != hipSuccess) {
throw std::runtime_error("hipGetDevice() failed");
}
error = hipGetDeviceProperties(&device_, device_idx);
if (error != hipSuccess) {
throw std::runtime_error("hipGetDeviceProperties() failed");
}
set_workspace_size(workspace_size);
Singleton::get();
}
/// Destructor
Handle::~Handle() {
if (workspace_) {
if (workspace_) {
hipFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = 0;
}
}
/// Move constructor
Handle::Handle(Handle && handle) {
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
}
/// Move assignment operator
Handle & Handle::operator=(Handle && handle) {
provider_ = handle.provider_;
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
return *this;
}
int Handle::compute_capability() const {
return device_.major * 10 + device_.minor;
}
/// Sets the current CUDA stream
void Handle::set_stream(hipStream_t stream) {
stream_ = stream;
}
/// Gets the current CUDA stream
hipStream_t Handle::get_stream() const {
return stream_;
}
/// Gets the current provider
Provider Handle::get_provider() const {
return provider_;
}
/// Sets the provider of operations
void Handle::set_provider(Provider provider) {
provider_ = provider;
}
/// Gets the device workspace size
size_t Handle::get_workspace_size() const {
return workspace_size_;
}
/// Gets a pointer to the device workspace allocation in Global Memory
void *Handle::get_workspace() const {
return workspace_;
}
/// Sets the size of device workspace, invalidating previous calls to get_device_workspace()
void Handle::set_workspace_size(size_t bytes) {
if (bytes != workspace_size_) {
if (workspace_) {
hipFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = bytes;
if (workspace_size_) {
hipError_t error = hipMalloc((void **)&workspace_, workspace_size_);
if (error != hipSuccess) {
throw std::runtime_error("Failed to allocate workspace");
}
}
}
if (workspace_) {
hipError_t error = hipMemset(workspace_, 0, workspace_size_);
if (error != hipSuccess) {
throw std::runtime_error("Failed to clear workspace");
}
}
}
/// Gets the scalar pointer mode
ScalarPointerMode Handle::get_scalar_pointer_mode() const {
return scalar_pointer_mode_;
}
/// Sets the scalar pointer mode
void Handle::set_scalar_pointer_mode(ScalarPointerMode mode) {
scalar_pointer_mode_ = mode;
}
/// Gets the last operation
Operation const *Handle::get_last_operation() const {
return last_operation_;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the maximum required alignment for each operator
static int maximum_alignment_requirement(GemmDescription const &desc) {
return ::max(
::max(desc.A.alignment, desc.B.alignment), desc.C.alignment);
}
/// Returns the largest alignment (in units of elements) the problem satisfies, starting from a
/// given upper limit.
static int gemm_problem_alignment(
int M,
int N,
int K,
NumericTypeID element_A,
void const *ptr_A,
int64_t lda,
int64_t batch_stride_A,
NumericTypeID element_B,
void const *ptr_B,
int64_t ldb,
int64_t batch_stride_B,
NumericTypeID element_C,
void const * ptr_C,
int64_t ldc,
int64_t batch_stride_C,
void const * ptr_D,
int64_t ldd,
int64_t batch_stride_D,
int max_alignment_in_bytes = 16
) {
void const *pointers[] = {
ptr_A, ptr_B, ptr_C, ptr_D
};
int64_t extents[] = {
M, N, K, lda, ldb, ldc, ldd, batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D
};
NumericTypeID elements[] = {
element_A, element_B, element_C
};
for (; max_alignment_in_bytes > 0; max_alignment_in_bytes /= 2) {
bool satisfied = true;
// Can pointers satisfy this?
for (void const *ptr : pointers) {
std::uintptr_t int_ptr = reinterpret_cast<std::uintptr_t>(ptr);
if (int_ptr % max_alignment_in_bytes) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Compute the maximum alignment based on element data types
int max_element_alignment = 0;
for (NumericTypeID type_id : elements) {
int element_alignment = max_alignment_in_bytes * 8 / library::sizeof_bits(type_id);
max_element_alignment = ::max(max_element_alignment, element_alignment);
}
// Can the problem size and leading dimensions satisfy this?
for (int64_t extent : extents) {
if (extent % max_element_alignment) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Yes
return max_element_alignment;
}
// No alignment satisfies this problem
return 0;
}
/// Find the best kernel in descending order of preference.
static Operation const * find_gemm_operation(
GemmOperationFunctionalMap::const_iterator operators_it,
GemmPreferenceKey const preference_key) {
auto cc_it = operators_it->second.upper_bound(preference_key);
if (cc_it == operators_it->second.begin()) {
return nullptr;
}
Operation const *operation = nullptr;
// Search in descending order of compute capability
do {
--cc_it;
// Search tile sizes in order, for now.
for (auto const * op : cc_it->second) {
GemmDescription const &desc = static_cast<GemmDescription const &>(op->description());
int min_cc = desc.tile_description.minimum_compute_capability;
int max_cc = desc.tile_description.maximum_compute_capability;
int op_alignment = maximum_alignment_requirement(desc);
if ((min_cc <= preference_key.compute_capability) &&
(preference_key.compute_capability <= max_cc) &&
(op_alignment <= preference_key.alignment)) {
operation = op;
break;
}
}
} while (!operation && cc_it != operators_it->second.begin());
return operation;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C
Status Handle::gemm(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices
void const * ptr_A, /// Pointer to A matrix in Global Memory
int64_t lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices
void const * ptr_B, /// Pointer to B matrix in Global Memory
int64_t ldb, /// Leading dimension of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const * ptr_C, /// Pointer to C matrix
int64_t ldc, /// Leading dimension of C matrix
void * ptr_D, /// Pointer to D matrix
int64_t ldd /// Leading dimension of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kGemm,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = gemm_problem_alignment(
M, N, K,
element_A, ptr_A, lda, 0,
element_B, ptr_B, ldb, 0,
element_C, ptr_C, ldc, 0,
ptr_D, ldd, 0, kMaximumAlignmentSize
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmConfiguration configuration{
{M, N, K},
lda,
ldb,
ldc,
ldd,
1
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmArguments arguments{
ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C.
//
// Supports batched-strided, batched array or split-K serial or split-K parallel.
//
Status Handle::gemm_universal(
GemmUniversalMode mode, /// indicates the mode in which the kUniversal GEMM is launched
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices
void const * ptr_A, /// Pointer to A matrix in Global Memory
int64_t lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices
void const * ptr_B, /// Pointer to B matrix in Global Memory
int64_t ldb, /// Leading dimension of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const * ptr_C, /// Pointer to C matrix
int64_t ldc, /// Leading dimension of C matrix
void * ptr_D, /// Pointer to D matrix
int64_t ldd, /// Leading dimension of D matrix
int batch_count, /// Batch count or number of split-K slices
int64_t batch_stride_A, /// Batch stride of A operand
int64_t batch_stride_B, /// Batch stride of B operand
int64_t batch_stride_C, /// Batch stride of C operand
int64_t batch_stride_D /// Batch stride of D operand
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kUniversal,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
void const *ptr_A_check = ptr_A;
void const *ptr_B_check = ptr_B;
void const *ptr_C_check = ptr_C;
void * ptr_D_check = ptr_D;
// Ignore alignment of pointers to pointers. We can't check this from the host,
// as each batch index has its own pointer in device memory.
if (mode == GemmUniversalMode::kArray) {
ptr_A_check = nullptr;
ptr_B_check = nullptr;
ptr_C_check = nullptr;
ptr_D_check = nullptr;
}
int alignment = gemm_problem_alignment(
M, N, K,
element_A, ptr_A_check, lda, 0,
element_B, ptr_B_check, ldb, 0,
element_C, ptr_C_check, ldc, 0,
ptr_D_check, ldd, 0, kMaximumAlignmentSize
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmUniversalConfiguration configuration{
mode,
{M, N, K},
batch_count,
lda,
ldb,
ldc,
ldd
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
GemmUniversalArguments arguments{
ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D
};
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration, &arguments);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex GEMM
Status Handle::gemm_planar_complex(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix
void const * ptr_A_real, /// Pointer to real part of A matrix
void const * ptr_A_imag, /// Pointer to imaginary part of A matrix
int64_t lda_real, /// Leading dimension of real part of A matrix
int64_t lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix
void const * ptr_B_real, /// Pointer to real part of B matrix
void const * ptr_B_imag, /// Pointer to imaginary part of B matrix
int64_t ldb_real, /// Leading dimension of real part of B matrix
int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const * ptr_C_real, /// Pointer to real part of C matrix
void const * ptr_C_imag, /// Pointer to imaginary part of C matrix
int64_t ldc_real, /// Leading dimension of real part of C matrix
int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix
void * ptr_D_real, /// Pointer to real part of D matrix
void * ptr_D_imag, /// Pointer to imaginary part of D matrix
int64_t ldd_real, /// Leading dimension of real part of D matrix
int64_t ldd_imag, /// Leading dimension of imaginary part of D matrix
int batch_count, /// Number of batched GEMMs to execute
int64_t batch_stride_A_real,
int64_t batch_stride_A_imag,
int64_t batch_stride_B_real,
int64_t batch_stride_B_imag,
int64_t batch_stride_C_real,
int64_t batch_stride_C_imag,
int64_t batch_stride_D_real,
int64_t batch_stride_D_imag
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kPlanarComplex,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = ::max(
gemm_problem_alignment(
M, N, K,
element_A, ptr_A_real, lda_real, batch_stride_A_real,
element_B, ptr_B_real, ldb_real, batch_stride_B_real,
element_C, ptr_C_real, ldc_real, batch_stride_C_real,
ptr_D_real, ldd_real, batch_stride_D_real, kMaximumAlignmentSize
),
gemm_problem_alignment(
M, N, K,
element_A, ptr_A_imag, lda_imag, batch_stride_A_imag,
element_B, ptr_B_imag, ldb_imag, batch_stride_B_imag,
element_C, ptr_C_imag, ldc_imag, batch_stride_C_imag,
ptr_D_imag, ldd_imag, batch_stride_D_imag, kMaximumAlignmentSize
)
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexConfiguration configuration{
GemmUniversalMode::kBatched,
{M, N, K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArguments arguments{
ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A_real,
batch_stride_A_imag,
batch_stride_B_real,
batch_stride_B_imag,
batch_stride_C_real,
batch_stride_C_imag,
batch_stride_D_real,
batch_stride_D_imag
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex batched GEMM loading pointers from arrays in global memory
Status Handle::gemm_planar_complex_array(
int expected_M, /// Expected GEMM M dimension (used for sizing CUDA grid)
int expected_N, /// Expected GEMM N dimension (used for sizing CUDA grid)
int expected_K, /// Expected GEMM K dimension
int batch_count, /// Number of independent GEMM computations to execute
int const *M, /// Array containing the GEMM M dimension for each batch index
int const *N, /// Array containing the GEMM N dimension for each batch index
int const *K, /// Array containing the GEMM K dimension for each batch index
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix
void const * const * ptr_A_real, /// Pointer to array containing pointers to real part of A matrices
void const * const * ptr_A_imag, /// Pointer to array containing pointers to imaginary part of A matrices
int64_t lda_real, /// Leading dimension of real part of A matrix
int64_t lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix
void const * const * ptr_B_real, /// Pointer to array containing pointers to real part of B matrices
void const * const * ptr_B_imag, /// Pointer to array containing pointers to imaginary part of B matrices
int64_t ldb_real, /// Leading dimension of real part of B matrix
int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const * const * ptr_C_real, /// Pointer to array containing pointers to real part of C matrices
void const * const * ptr_C_imag, /// Pointer to array containing poitners to imaginary part of C matrices
int64_t ldc_real, /// Leading dimension of real part of C matrix
int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix
void * const * ptr_D_real, /// Pointer to array containing pointers to real part of D matrices
void * const * ptr_D_imag, /// Pointer to array containing poitners to imaginary part of D matrices
int64_t ldd_real, /// Leading dimension of real part of D matrix
int64_t ldd_imag /// Leading dimension of imaginary part of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kPlanarComplexArray,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = ::max(
gemm_problem_alignment(
expected_M, expected_N, expected_K,
element_A, nullptr, lda_real, 0,
element_B, nullptr, ldb_real, 0,
element_C, nullptr, ldc_real, 0,
nullptr, ldd_real, 0, kMaximumAlignmentSize
),
gemm_problem_alignment(
expected_M, expected_N, expected_K,
element_A, nullptr, lda_imag, 0,
element_B, nullptr, ldb_imag, 0,
element_C, nullptr, ldc_imag, 0,
nullptr, ldd_imag, 0, kMaximumAlignmentSize
)
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexArrayConfiguration configuration{
{expected_M, expected_N, expected_K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArrayArguments arguments{
M, N, K,
ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Finds conv operation instances with Conv::ElementC = Reduction::ElementWorkspace
Operation const* find_conv_operation_for_parallel_reduction(Operation const *operation) {
ConvDescription const &conv_desc =
static_cast<ConvDescription const &>(operation->description());
// if the curren conv operation accumulator and output data type match return operation
if(conv_desc.tile_description.math_instruction.element_accumulator == conv_desc.C.element) {
return operation;
}
// find conv operation to match conv output and reduction workspace data type
ConvFunctionalKey key(
library::Provider::kCUTLASS,
conv_desc.conv_kind,
conv_desc.A.element,
conv_desc.A.layout,
conv_desc.B.element,
conv_desc.B.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.C.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.element_epilogue);
// conv operation table for conv2d or conv3d
auto conv_operations = (conv_desc.kind == OperationKind::kConv2d) ?
Singleton::get().operation_table.conv2d_operations :
Singleton::get().operation_table.conv3d_operations;
// find ConvFunctionalKey in convolution operation table
auto operators_it = conv_operations.find(key);
if (operators_it == conv_operations.end()) {
return nullptr;
}
if (operators_it->second.empty()) {
return nullptr;
}
// conv operation for same compute capability and iterator algorithm
ConvPreferenceKey preference_key(
conv_desc.tile_description.minimum_compute_capability,
conv_desc.iterator_algorithm);
auto it = operators_it->second.find(preference_key);
if(it == operators_it->second.end()) {
return nullptr;
}
// return matching conv opertion (same tile sizes and instruction)
for (auto op : it->second) {
if (op->description().tile_description == operation->description().tile_description) {
return op;
}
}
return nullptr;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Finds gemm operation instances with Gemm::ElementC = Reduction::ElementWorkspace
Operation const* find_gemm_operation_for_parallel_reduction(Operation const *operation) {
GemmDescription const &gemm_desc =
static_cast<GemmDescription const &>(operation->description());
// if the curren gemm operation accumulator and output data type match return operation
if(gemm_desc.tile_description.math_instruction.element_accumulator == gemm_desc.C.element) {
return operation;
}
// find gemm operation to match gemm output and reduction workspace data type
GemmFunctionalKey key(
library::Provider::kCUTLASS,
gemm_desc.gemm_kind,
gemm_desc.tile_description.math_instruction.element_accumulator,
gemm_desc.element_epilogue,
gemm_desc.A.element,
gemm_desc.A.layout,
gemm_desc.transform_A,
gemm_desc.B.element,
gemm_desc.B.layout,
gemm_desc.transform_B,
gemm_desc.tile_description.math_instruction.element_accumulator);
// gemm operation table
auto gemm_operations = Singleton::get().operation_table.gemm_operations;
// find ConvFunctionalKey in gemm operation table
auto operators_it = gemm_operations.find(key);
if (operators_it == gemm_operations.end()) {
return nullptr;
}
if (operators_it->second.empty()) {
return nullptr;
}
// A and B uses the same alignment in the generator.py
int alignment = gemm_desc.A.alignment;
// gemm operation for same compute capability and iterator algorithm
GemmPreferenceKey preference_key(
gemm_desc.tile_description.minimum_compute_capability,
alignment);
return find_gemm_operation(operators_it, preference_key);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| ea60cebbefba13a72b554ffb80ec1f0ad295f8ba.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief CUTLASS Library handle.
*/
#include <iostream>
#include <stdexcept>
#include <cstdint>
#include "cutlass/library/handle.h"
#include "cutlass/library/singleton.h"
#include "cutlass/library/util.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructor
Handle::Handle(
cudaStream_t stream,
size_t workspace_size
):
provider_(Provider::kCUTLASS),
stream_(stream),
workspace_(nullptr),
workspace_size_(0),
scalar_pointer_mode_(ScalarPointerMode::kHost),
last_operation_(nullptr) {
int device_idx = -1;
cudaError_t error = cudaGetDevice(&device_idx);
if (error != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() failed");
}
error = cudaGetDeviceProperties(&device_, device_idx);
if (error != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
set_workspace_size(workspace_size);
Singleton::get();
}
/// Destructor
Handle::~Handle() {
if (workspace_) {
if (workspace_) {
cudaFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = 0;
}
}
/// Move constructor
Handle::Handle(Handle && handle) {
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
}
/// Move assignment operator
Handle & Handle::operator=(Handle && handle) {
provider_ = handle.provider_;
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
return *this;
}
int Handle::compute_capability() const {
return device_.major * 10 + device_.minor;
}
/// Sets the current CUDA stream
void Handle::set_stream(cudaStream_t stream) {
stream_ = stream;
}
/// Gets the current CUDA stream
cudaStream_t Handle::get_stream() const {
return stream_;
}
/// Gets the current provider
Provider Handle::get_provider() const {
return provider_;
}
/// Sets the provider of operations
void Handle::set_provider(Provider provider) {
provider_ = provider;
}
/// Gets the device workspace size
size_t Handle::get_workspace_size() const {
return workspace_size_;
}
/// Gets a pointer to the device workspace allocation in Global Memory
void *Handle::get_workspace() const {
return workspace_;
}
/// Sets the size of device workspace, invalidating previous calls to get_device_workspace()
void Handle::set_workspace_size(size_t bytes) {
if (bytes != workspace_size_) {
if (workspace_) {
cudaFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = bytes;
if (workspace_size_) {
cudaError_t error = cudaMalloc((void **)&workspace_, workspace_size_);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to allocate workspace");
}
}
}
if (workspace_) {
cudaError_t error = cudaMemset(workspace_, 0, workspace_size_);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to clear workspace");
}
}
}
/// Gets the scalar pointer mode
ScalarPointerMode Handle::get_scalar_pointer_mode() const {
return scalar_pointer_mode_;
}
/// Sets the scalar pointer mode
void Handle::set_scalar_pointer_mode(ScalarPointerMode mode) {
scalar_pointer_mode_ = mode;
}
/// Gets the last operation
Operation const *Handle::get_last_operation() const {
return last_operation_;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the maximum required alignment for each operator
static int maximum_alignment_requirement(GemmDescription const &desc) {
return std::max(
std::max(desc.A.alignment, desc.B.alignment), desc.C.alignment);
}
/// Returns the largest alignment (in units of elements) the problem satisfies, starting from a
/// given upper limit.
static int gemm_problem_alignment(
int M,
int N,
int K,
NumericTypeID element_A,
void const *ptr_A,
int64_t lda,
int64_t batch_stride_A,
NumericTypeID element_B,
void const *ptr_B,
int64_t ldb,
int64_t batch_stride_B,
NumericTypeID element_C,
void const * ptr_C,
int64_t ldc,
int64_t batch_stride_C,
void const * ptr_D,
int64_t ldd,
int64_t batch_stride_D,
int max_alignment_in_bytes = 16
) {
void const *pointers[] = {
ptr_A, ptr_B, ptr_C, ptr_D
};
int64_t extents[] = {
M, N, K, lda, ldb, ldc, ldd, batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D
};
NumericTypeID elements[] = {
element_A, element_B, element_C
};
for (; max_alignment_in_bytes > 0; max_alignment_in_bytes /= 2) {
bool satisfied = true;
// Can pointers satisfy this?
for (void const *ptr : pointers) {
std::uintptr_t int_ptr = reinterpret_cast<std::uintptr_t>(ptr);
if (int_ptr % max_alignment_in_bytes) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Compute the maximum alignment based on element data types
int max_element_alignment = 0;
for (NumericTypeID type_id : elements) {
int element_alignment = max_alignment_in_bytes * 8 / library::sizeof_bits(type_id);
max_element_alignment = std::max(max_element_alignment, element_alignment);
}
// Can the problem size and leading dimensions satisfy this?
for (int64_t extent : extents) {
if (extent % max_element_alignment) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Yes
return max_element_alignment;
}
// No alignment satisfies this problem
return 0;
}
/// Find the best kernel in descending order of preference.
static Operation const * find_gemm_operation(
GemmOperationFunctionalMap::const_iterator operators_it,
GemmPreferenceKey const preference_key) {
auto cc_it = operators_it->second.upper_bound(preference_key);
if (cc_it == operators_it->second.begin()) {
return nullptr;
}
Operation const *operation = nullptr;
// Search in descending order of compute capability
do {
--cc_it;
// Search tile sizes in order, for now.
for (auto const * op : cc_it->second) {
GemmDescription const &desc = static_cast<GemmDescription const &>(op->description());
int min_cc = desc.tile_description.minimum_compute_capability;
int max_cc = desc.tile_description.maximum_compute_capability;
int op_alignment = maximum_alignment_requirement(desc);
if ((min_cc <= preference_key.compute_capability) &&
(preference_key.compute_capability <= max_cc) &&
(op_alignment <= preference_key.alignment)) {
operation = op;
break;
}
}
} while (!operation && cc_it != operators_it->second.begin());
return operation;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C
Status Handle::gemm(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices
void const * ptr_A, /// Pointer to A matrix in Global Memory
int64_t lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices
void const * ptr_B, /// Pointer to B matrix in Global Memory
int64_t ldb, /// Leading dimension of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const * ptr_C, /// Pointer to C matrix
int64_t ldc, /// Leading dimension of C matrix
void * ptr_D, /// Pointer to D matrix
int64_t ldd /// Leading dimension of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kGemm,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = gemm_problem_alignment(
M, N, K,
element_A, ptr_A, lda, 0,
element_B, ptr_B, ldb, 0,
element_C, ptr_C, ldc, 0,
ptr_D, ldd, 0, kMaximumAlignmentSize
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmConfiguration configuration{
{M, N, K},
lda,
ldb,
ldc,
ldd,
1
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmArguments arguments{
ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C.
//
// Supports batched-strided, batched array or split-K serial or split-K parallel.
//
Status Handle::gemm_universal(
GemmUniversalMode mode, /// indicates the mode in which the kUniversal GEMM is launched
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices
void const * ptr_A, /// Pointer to A matrix in Global Memory
int64_t lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices
void const * ptr_B, /// Pointer to B matrix in Global Memory
int64_t ldb, /// Leading dimension of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const * ptr_C, /// Pointer to C matrix
int64_t ldc, /// Leading dimension of C matrix
void * ptr_D, /// Pointer to D matrix
int64_t ldd, /// Leading dimension of D matrix
int batch_count, /// Batch count or number of split-K slices
int64_t batch_stride_A, /// Batch stride of A operand
int64_t batch_stride_B, /// Batch stride of B operand
int64_t batch_stride_C, /// Batch stride of C operand
int64_t batch_stride_D /// Batch stride of D operand
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kUniversal,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
void const *ptr_A_check = ptr_A;
void const *ptr_B_check = ptr_B;
void const *ptr_C_check = ptr_C;
void * ptr_D_check = ptr_D;
// Ignore alignment of pointers to pointers. We can't check this from the host,
// as each batch index has its own pointer in device memory.
if (mode == GemmUniversalMode::kArray) {
ptr_A_check = nullptr;
ptr_B_check = nullptr;
ptr_C_check = nullptr;
ptr_D_check = nullptr;
}
int alignment = gemm_problem_alignment(
M, N, K,
element_A, ptr_A_check, lda, 0,
element_B, ptr_B_check, ldb, 0,
element_C, ptr_C_check, ldc, 0,
ptr_D_check, ldd, 0, kMaximumAlignmentSize
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmUniversalConfiguration configuration{
mode,
{M, N, K},
batch_count,
lda,
ldb,
ldc,
ldd
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
GemmUniversalArguments arguments{
ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D
};
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration, &arguments);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex GEMM
Status Handle::gemm_planar_complex(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix
void const * ptr_A_real, /// Pointer to real part of A matrix
void const * ptr_A_imag, /// Pointer to imaginary part of A matrix
int64_t lda_real, /// Leading dimension of real part of A matrix
int64_t lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix
void const * ptr_B_real, /// Pointer to real part of B matrix
void const * ptr_B_imag, /// Pointer to imaginary part of B matrix
int64_t ldb_real, /// Leading dimension of real part of B matrix
int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const * ptr_C_real, /// Pointer to real part of C matrix
void const * ptr_C_imag, /// Pointer to imaginary part of C matrix
int64_t ldc_real, /// Leading dimension of real part of C matrix
int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix
void * ptr_D_real, /// Pointer to real part of D matrix
void * ptr_D_imag, /// Pointer to imaginary part of D matrix
int64_t ldd_real, /// Leading dimension of real part of D matrix
int64_t ldd_imag, /// Leading dimension of imaginary part of D matrix
int batch_count, /// Number of batched GEMMs to execute
int64_t batch_stride_A_real,
int64_t batch_stride_A_imag,
int64_t batch_stride_B_real,
int64_t batch_stride_B_imag,
int64_t batch_stride_C_real,
int64_t batch_stride_C_imag,
int64_t batch_stride_D_real,
int64_t batch_stride_D_imag
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kPlanarComplex,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = std::max(
gemm_problem_alignment(
M, N, K,
element_A, ptr_A_real, lda_real, batch_stride_A_real,
element_B, ptr_B_real, ldb_real, batch_stride_B_real,
element_C, ptr_C_real, ldc_real, batch_stride_C_real,
ptr_D_real, ldd_real, batch_stride_D_real, kMaximumAlignmentSize
),
gemm_problem_alignment(
M, N, K,
element_A, ptr_A_imag, lda_imag, batch_stride_A_imag,
element_B, ptr_B_imag, ldb_imag, batch_stride_B_imag,
element_C, ptr_C_imag, ldc_imag, batch_stride_C_imag,
ptr_D_imag, ldd_imag, batch_stride_D_imag, kMaximumAlignmentSize
)
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexConfiguration configuration{
GemmUniversalMode::kBatched,
{M, N, K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArguments arguments{
ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A_real,
batch_stride_A_imag,
batch_stride_B_real,
batch_stride_B_imag,
batch_stride_C_real,
batch_stride_C_imag,
batch_stride_D_real,
batch_stride_D_imag
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex batched GEMM loading pointers from arrays in global memory
Status Handle::gemm_planar_complex_array(
int expected_M, /// Expected GEMM M dimension (used for sizing CUDA grid)
int expected_N, /// Expected GEMM N dimension (used for sizing CUDA grid)
int expected_K, /// Expected GEMM K dimension
int batch_count, /// Number of independent GEMM computations to execute
int const *M, /// Array containing the GEMM M dimension for each batch index
int const *N, /// Array containing the GEMM N dimension for each batch index
int const *K, /// Array containing the GEMM K dimension for each batch index
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix
void const * const * ptr_A_real, /// Pointer to array containing pointers to real part of A matrices
void const * const * ptr_A_imag, /// Pointer to array containing pointers to imaginary part of A matrices
int64_t lda_real, /// Leading dimension of real part of A matrix
int64_t lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix
void const * const * ptr_B_real, /// Pointer to array containing pointers to real part of B matrices
void const * const * ptr_B_imag, /// Pointer to array containing pointers to imaginary part of B matrices
int64_t ldb_real, /// Leading dimension of real part of B matrix
int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const * const * ptr_C_real, /// Pointer to array containing pointers to real part of C matrices
void const * const * ptr_C_imag, /// Pointer to array containing poitners to imaginary part of C matrices
int64_t ldc_real, /// Leading dimension of real part of C matrix
int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix
void * const * ptr_D_real, /// Pointer to array containing pointers to real part of D matrices
void * const * ptr_D_imag, /// Pointer to array containing poitners to imaginary part of D matrices
int64_t ldd_real, /// Leading dimension of real part of D matrix
int64_t ldd_imag /// Leading dimension of imaginary part of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kPlanarComplexArray,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = std::max(
gemm_problem_alignment(
expected_M, expected_N, expected_K,
element_A, nullptr, lda_real, 0,
element_B, nullptr, ldb_real, 0,
element_C, nullptr, ldc_real, 0,
nullptr, ldd_real, 0, kMaximumAlignmentSize
),
gemm_problem_alignment(
expected_M, expected_N, expected_K,
element_A, nullptr, lda_imag, 0,
element_B, nullptr, ldb_imag, 0,
element_C, nullptr, ldc_imag, 0,
nullptr, ldd_imag, 0, kMaximumAlignmentSize
)
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexArrayConfiguration configuration{
{expected_M, expected_N, expected_K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArrayArguments arguments{
M, N, K,
ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Finds conv operation instances with Conv::ElementC = Reduction::ElementWorkspace
Operation const* find_conv_operation_for_parallel_reduction(Operation const *operation) {
ConvDescription const &conv_desc =
static_cast<ConvDescription const &>(operation->description());
// if the curren conv operation accumulator and output data type match return operation
if(conv_desc.tile_description.math_instruction.element_accumulator == conv_desc.C.element) {
return operation;
}
// find conv operation to match conv output and reduction workspace data type
ConvFunctionalKey key(
library::Provider::kCUTLASS,
conv_desc.conv_kind,
conv_desc.A.element,
conv_desc.A.layout,
conv_desc.B.element,
conv_desc.B.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.C.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.element_epilogue);
// conv operation table for conv2d or conv3d
auto conv_operations = (conv_desc.kind == OperationKind::kConv2d) ?
Singleton::get().operation_table.conv2d_operations :
Singleton::get().operation_table.conv3d_operations;
// find ConvFunctionalKey in convolution operation table
auto operators_it = conv_operations.find(key);
if (operators_it == conv_operations.end()) {
return nullptr;
}
if (operators_it->second.empty()) {
return nullptr;
}
// conv operation for same compute capability and iterator algorithm
ConvPreferenceKey preference_key(
conv_desc.tile_description.minimum_compute_capability,
conv_desc.iterator_algorithm);
auto it = operators_it->second.find(preference_key);
if(it == operators_it->second.end()) {
return nullptr;
}
// return matching conv opertion (same tile sizes and instruction)
for (auto op : it->second) {
if (op->description().tile_description == operation->description().tile_description) {
return op;
}
}
return nullptr;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Finds gemm operation instances with Gemm::ElementC = Reduction::ElementWorkspace
Operation const* find_gemm_operation_for_parallel_reduction(Operation const *operation) {
GemmDescription const &gemm_desc =
static_cast<GemmDescription const &>(operation->description());
// if the curren gemm operation accumulator and output data type match return operation
if(gemm_desc.tile_description.math_instruction.element_accumulator == gemm_desc.C.element) {
return operation;
}
// find gemm operation to match gemm output and reduction workspace data type
GemmFunctionalKey key(
library::Provider::kCUTLASS,
gemm_desc.gemm_kind,
gemm_desc.tile_description.math_instruction.element_accumulator,
gemm_desc.element_epilogue,
gemm_desc.A.element,
gemm_desc.A.layout,
gemm_desc.transform_A,
gemm_desc.B.element,
gemm_desc.B.layout,
gemm_desc.transform_B,
gemm_desc.tile_description.math_instruction.element_accumulator);
// gemm operation table
auto gemm_operations = Singleton::get().operation_table.gemm_operations;
// find ConvFunctionalKey in gemm operation table
auto operators_it = gemm_operations.find(key);
if (operators_it == gemm_operations.end()) {
return nullptr;
}
if (operators_it->second.empty()) {
return nullptr;
}
// A and B uses the same alignment in the generator.py
int alignment = gemm_desc.A.alignment;
// gemm operation for same compute capability and iterator algorithm
GemmPreferenceKey preference_key(
gemm_desc.tile_description.minimum_compute_capability,
alignment);
return find_gemm_operation(operators_it, preference_key);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
2fd88b96718b1dcb46b8558f6a80023363513f17.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=32 --gridDim=2
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
using namespace cooperative_groups;
__global__ void race (int* A)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
grid_group g = this_grid();
int idx = blockDim.x * bid + tid;
int temp = A[idx + 1];
synchronize(g);
A[idx] = temp;
} | 2fd88b96718b1dcb46b8558f6a80023363513f17.cu | //pass
//--blockDim=32 --gridDim=2
#include <cuda.h>
#include <cooperative_groups.h>
using namespace cooperative_groups;
__global__ void race (int* A)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
grid_group g = this_grid();
int idx = blockDim.x * bid + tid;
int temp = A[idx + 1];
synchronize(g);
A[idx] = temp;
} |
506e2e0eaf0163b9800350df5fb0c304fc6319eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
#include <float.h>
__global__ void maxpooling_kernel(float *output, float *input,
int batch, int channel, int height, int width,
int kernel_height, int kernel_width, int pad_height, int pad_width, int stride_height, int stride_width, int total_size)
{
int N = batch;
int C = channel;
int H = height;
int W = width;
int kH = kernel_height;
int kW = kernel_width;
int pH = pad_height;
int pW = pad_width;
int sH = stride_height;
int sW = stride_width;
int P = ((H + 2 * pH - kH) / sH) + 1;
int Q = ((W + 2 * pW - kW) / sW) + 1;
//tid : thread id
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= total_size)
return;
//q_idx : output w-index
int q_idx = tid % Q;
int idx = tid / Q;
//p_idx : output h-index
int p_idx = idx % P;
idx /= P;
//k_idx : output channel-index
int k_idx = idx % C;
//n_idx : output batch-index
int n_idx = idx / C;
//output(n_idx, k_idx, p_idx, q_idx)
float max = -FLT_MAX;
for (int kh = 0; kh < kH; kh++) {
int h_idx = p_idx * sH + kh - pH;
if (h_idx >= 0 && h_idx < H) {
for (int kw = 0; kw < kW; kw++) {
int w_idx = q_idx * sW + kw - pW;
if (w_idx >= 0 && w_idx < W) {
int input_index = n_idx * C * H * W + k_idx * H * W + h_idx * W + w_idx;
if (input[input_index] > max) {
max = input[input_index];
}
}
}
}
}
output[tid] = max;
}
void maxpooling(float *output, float *input,
int batch, int channel, int height, int width,
int kernel_height, int kernel_width, int pad_height, int pad_width, int stride_height, int stride_width)
{
int N = batch;
int C = channel;
int H = height;
int W = width;
int kH = kernel_height;
int kW = kernel_width;
int pH = pad_height;
int pW = pad_width;
int sH = stride_height;
int sW = stride_width;
int P = (H + 2 * pH - kH) / sH + 1;
int Q = (W + 2 * pW - kW) / sW + 1;
int THREADS_PER_BLOCK = 256;
int TOTAL_SIZE = N * C * P * Q;
int NUMBER_OF_BLOCKS = (TOTAL_SIZE + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
maxpooling_kernel << < NUMBER_OF_BLOCKS, THREADS_PER_BLOCK >> > (output, input, N, C, H, W, kH, kW, pH, pW, sH, sW, TOTAL_SIZE);
} | 506e2e0eaf0163b9800350df5fb0c304fc6319eb.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
#include <float.h>
__global__ void maxpooling_kernel(float *output, float *input,
int batch, int channel, int height, int width,
int kernel_height, int kernel_width, int pad_height, int pad_width, int stride_height, int stride_width, int total_size)
{
int N = batch;
int C = channel;
int H = height;
int W = width;
int kH = kernel_height;
int kW = kernel_width;
int pH = pad_height;
int pW = pad_width;
int sH = stride_height;
int sW = stride_width;
int P = ((H + 2 * pH - kH) / sH) + 1;
int Q = ((W + 2 * pW - kW) / sW) + 1;
//tid : thread id
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= total_size)
return;
//q_idx : output w-index
int q_idx = tid % Q;
int idx = tid / Q;
//p_idx : output h-index
int p_idx = idx % P;
idx /= P;
//k_idx : output channel-index
int k_idx = idx % C;
//n_idx : output batch-index
int n_idx = idx / C;
//output(n_idx, k_idx, p_idx, q_idx)
float max = -FLT_MAX;
for (int kh = 0; kh < kH; kh++) {
int h_idx = p_idx * sH + kh - pH;
if (h_idx >= 0 && h_idx < H) {
for (int kw = 0; kw < kW; kw++) {
int w_idx = q_idx * sW + kw - pW;
if (w_idx >= 0 && w_idx < W) {
int input_index = n_idx * C * H * W + k_idx * H * W + h_idx * W + w_idx;
if (input[input_index] > max) {
max = input[input_index];
}
}
}
}
}
output[tid] = max;
}
void maxpooling(float *output, float *input,
int batch, int channel, int height, int width,
int kernel_height, int kernel_width, int pad_height, int pad_width, int stride_height, int stride_width)
{
int N = batch;
int C = channel;
int H = height;
int W = width;
int kH = kernel_height;
int kW = kernel_width;
int pH = pad_height;
int pW = pad_width;
int sH = stride_height;
int sW = stride_width;
int P = (H + 2 * pH - kH) / sH + 1;
int Q = (W + 2 * pW - kW) / sW + 1;
int THREADS_PER_BLOCK = 256;
int TOTAL_SIZE = N * C * P * Q;
int NUMBER_OF_BLOCKS = (TOTAL_SIZE + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
maxpooling_kernel << < NUMBER_OF_BLOCKS, THREADS_PER_BLOCK >> > (output, input, N, C, H, W, kH, kW, pH, pW, sH, sW, TOTAL_SIZE);
} |
859f6bdc4f2938b7569d666685354d7214863ba2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <torch/serialize/tensor.h>
#include <THH/THH.h>
#include "./../gpu.h"
#include "./defines.h"
#include "./pairwise_ops.h"
namespace prob_phoc {
namespace gpu {
template <typename T, typename O>
__global__
void cphoc_kernel(const long int na, const long int nb, const long int d, const T* xa, const T* xb, T* y, const O op) {
for (long int i = thGy; i < na; i += NTGy) {
for (long int j = thGx; j < nb; j += NTGx) {
const T* xa_i = xa + i * d;
const T* xb_j = xb + j * d;
y[i * nb + j] = op(d, xa_i, xb_j);
}
}
}
template <typename T, typename O>
__global__
void pphoc_kernel(const long int n, const long int d, const T* x, T* y, const O op) {
for (long int i = thGy; i < n; i += NTGy) {
for (long int j = thGx; j < n; j += NTGx) {
if (j > i) {
const T* x_i = x + i * d;
const T* x_j = x + j * d;
const long k = i * (2 * n - i - 1) / 2 + (j - i - 1);
y[k] = op(d, x_i, x_j);
}
}
}
}
template <typename T, typename O>
void Impl<T, O>::cphoc(const c10::Device& device, const long int na, const long int nb, const long int d, const T* xa, const T* xb, T* y) const {
c10::DeviceGuard device_guard(device);
auto stream = THCState_getCurrentStream(at::globalContext().getTHCState());
const dim3 block_size(32, 32);
const dim3 grid_size(NUM_BLOCKS(na, 32),
NUM_BLOCKS(nb, 32));
hipLaunchKernelGGL(( cphoc_kernel<T, O>), dim3(grid_size), dim3(block_size), 0, stream, na, nb, d, xa, xb, y, op_);
if (stream == nullptr) {
CHECK_LAST_CUDA_CALL();
}
}
template <typename T, typename O>
void Impl<T, O>::pphoc(const c10::Device& device, const long int n, const long int d, const T* x, T* y) const {
c10::DeviceGuard device_guard(device);
auto stream = THCState_getCurrentStream(at::globalContext().getTHCState());
const dim3 block_size(32, 32);
const dim3 grid_size(NUM_BLOCKS(n, 32),
NUM_BLOCKS(n, 32));
hipLaunchKernelGGL(( pphoc_kernel<T, O>), dim3(grid_size), dim3(block_size), 0, stream, n, d, x, y, op_);
if (stream == nullptr) {
CHECK_LAST_CUDA_CALL();
}
}
template class SumProdLogSemiring<float>;
template class SumProdLogSemiring<double>;
template class SumProdRealSemiring<float>;
template class SumProdRealSemiring<double>;
} // namespace gpu
} // namespace prob_phoc
| 859f6bdc4f2938b7569d666685354d7214863ba2.cu | #include <torch/serialize/tensor.h>
#include <THC/THC.h>
#include "./../gpu.h"
#include "./defines.h"
#include "./pairwise_ops.h"
namespace prob_phoc {
namespace gpu {
template <typename T, typename O>
__global__
void cphoc_kernel(const long int na, const long int nb, const long int d, const T* xa, const T* xb, T* y, const O op) {
for (long int i = thGy; i < na; i += NTGy) {
for (long int j = thGx; j < nb; j += NTGx) {
const T* xa_i = xa + i * d;
const T* xb_j = xb + j * d;
y[i * nb + j] = op(d, xa_i, xb_j);
}
}
}
template <typename T, typename O>
__global__
void pphoc_kernel(const long int n, const long int d, const T* x, T* y, const O op) {
for (long int i = thGy; i < n; i += NTGy) {
for (long int j = thGx; j < n; j += NTGx) {
if (j > i) {
const T* x_i = x + i * d;
const T* x_j = x + j * d;
const long k = i * (2 * n - i - 1) / 2 + (j - i - 1);
y[k] = op(d, x_i, x_j);
}
}
}
}
template <typename T, typename O>
void Impl<T, O>::cphoc(const c10::Device& device, const long int na, const long int nb, const long int d, const T* xa, const T* xb, T* y) const {
c10::DeviceGuard device_guard(device);
auto stream = THCState_getCurrentStream(at::globalContext().getTHCState());
const dim3 block_size(32, 32);
const dim3 grid_size(NUM_BLOCKS(na, 32),
NUM_BLOCKS(nb, 32));
cphoc_kernel<T, O><<<grid_size, block_size, 0, stream>>>(na, nb, d, xa, xb, y, op_);
if (stream == nullptr) {
CHECK_LAST_CUDA_CALL();
}
}
template <typename T, typename O>
void Impl<T, O>::pphoc(const c10::Device& device, const long int n, const long int d, const T* x, T* y) const {
c10::DeviceGuard device_guard(device);
auto stream = THCState_getCurrentStream(at::globalContext().getTHCState());
const dim3 block_size(32, 32);
const dim3 grid_size(NUM_BLOCKS(n, 32),
NUM_BLOCKS(n, 32));
pphoc_kernel<T, O><<<grid_size, block_size, 0, stream>>>(n, d, x, y, op_);
if (stream == nullptr) {
CHECK_LAST_CUDA_CALL();
}
}
template class SumProdLogSemiring<float>;
template class SumProdLogSemiring<double>;
template class SumProdRealSemiring<float>;
template class SumProdRealSemiring<double>;
} // namespace gpu
} // namespace prob_phoc
|
de5116724198c75103414fd8b6d865e133ab5b8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// GPU-Accelerated Morphology Toolbox
//
// Author: Alan Reiner
// Email: [email protected]
// Date: 03 September, 2010
//
// Description: This header provides a complete set of tools to do any sequence
// of arbitrary morphological operations on a binary mask. The
// implementation is a little non-intuitive, because it uses the
// values {-1, 0, 1} internally to represent the mask, instead of
// the usual {0, 1}. This so that we can simultaneously represent
// "Don't Care" values in a structuring element, and we use only
// integer multiplications to do our Hit-Or-Miss (HoM) operations.
// Therefore, there is no need for conditionals in the inner loop
// (which are generally very slow on the GPU).
//
// A user of this library does not really need to understand the
// implementation, only if he intends to expand the library, and
// write extra morphological operations.
//
// The key to understanding the implementation is in the variable
// SE_NON_ZERO:
//
//
// SE_NON_ZERO:
//
// This variable is a side-effect of the highly efficient morphology ops.
// For a given pixel, when you compare the 3x3, you multiply the SE elements
// to the image elements. For each pixel compared, if the SE element is:
// HIT: result 1
// MISS: result -1
// DONTCARE: result 0
//
// We sum up the results for each pixel in the NxN neighborhood. In a hit-
// or-miss operation, we need all the +1 and -1 to match exactly (each
// pixel returns +1), so SE_NON_ZERO should be total number of non-zero
// elements. If we are looking for
// calculated on the fly, but it's constant for each SE, and would slow down
// the kernel significantly
//
// EXAMPLE:
//
// STRUCTURING ELEMENT: 0 1 0
// 1 1 -1
// 0 -1 -1
//
// This SE has 6 non-zero elements. Also important is the fact that ALL
// elements much hit in order to "pass", so we pass in 6 for SE_NON_ZERO
//
// IMAGE CHUNK1: 1 1 -1 Dilate Result: 6
// 1 1 -1
// -1 -1 -1
//
//
// IMAGE CHUNK2: -1 1 -1 Dilate Result: 6
// 1 1 -1
// -1 -1 -1
//
//
// IMAGE CHUNK3: -1 -1 1 Dilate Result: -6
// -1 -1 1
// 1 1 1
//
//
// IMAGE CHUNK4: -1 -1 -1 Dilate Result: 2
// 1 1 -1
// 1 1 -1
//
//
//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
#ifndef _GPU_MORPHOLOGY_CU_
#define _GPU_MORPHOLOGY_CU_
using namespace std;
#include <stdio.h>
#include <vector>
#define IDX_1D(col, row, stride) ((col * stride) + row)
#define COL_2D(index, stride) (index / stride)
#define ROW_2D(index, stride) (index % stride)
#define ROUNDUP32(integer) ( ((integer-1)/32 + 1) * 32 )
#define SHMEM 8192
#define FLOAT_SZ sizeof(float)
#define INT_SZ sizeof(int)
////////////////////////////////////////////////////////////////////////////////
//
// This macros is defined because EVERY convolution-like function has the same
// variables. Mainly, the pixel identifiers for this thread based on block
// size, and the size of the padded rectangle that each block will work with
//
// ***This is actually the same as the CONVOLVE version
//
////////////////////////////////////////////////////////////////////////////////
#define CREATE_CONVOLUTION_VARIABLES_MORPH(psfColRad, psfRowRad) \
\
const int cornerCol = blockDim.x*blockIdx.x; \
const int cornerRow = blockDim.y*blockIdx.y; \
const int globalCol = cornerCol + threadIdx.x; \
const int globalRow = cornerRow + threadIdx.y; \
const int globalIdx = IDX_1D(globalCol, globalRow, imgRows); \
\
const int localCol = threadIdx.x; \
const int localRow = threadIdx.y; \
const int localIdx = IDX_1D(localCol, localRow, blockDim.y); \
const int localPixels = blockDim.x*blockDim.y; \
\
const int padRectStride = blockDim.y + 2*psfRowRad; \
const int padRectCol = localCol + psfColRad; \
const int padRectRow = localRow + psfRowRad; \
/*const int padRectIdx = IDX_1D(padRectCol, padRectRow, padRectStride); */ \
const int padRectPixels = padRectStride * (blockDim.x + 2*psfColRad); \
\
__shared__ int sharedMem[SHMEM]; \
int* shmPadRect = (int*)sharedMem; \
int* shmOutput = (int*)&shmPadRect[ROUNDUP32(padRectPixels)]; \
int nLoop;
////////////////////////////////////////////////////////////////////////////////
//
// We are using -1 as "OFF" and +1 as "ON" and 0 as "DONTCARE"
// The user is not expected to do this him/herself, and it's easy enough to
// manipulate the data on the way in and out (just don't forget to convert back
// before copying out the result
//
////////////////////////////////////////////////////////////////////////////////
#define PREPARE_PADDED_RECTANGLE_MORPH(psfColRad, psfRowRad) \
\
nLoop = (padRectPixels/localPixels)+1; \
for(int loopIdx=0; loopIdx<nLoop; loopIdx++) \
{ \
int prIndex = loopIdx*localPixels + localIdx; \
if(prIndex < padRectPixels) \
{ \
int prCol = COL_2D(prIndex, padRectStride); \
int prRow = ROW_2D(prIndex, padRectStride); \
int glCol = cornerCol + prCol - psfColRad; \
int glRow = cornerRow + prRow - psfRowRad; \
int glIdx = IDX_1D(glCol, glRow, imgRows); \
if(glRow >= 0 && \
glRow < imgRows && \
glCol >= 0 && \
glCol < imgCols) \
shmPadRect[prIndex] = devInPtr[glIdx]*2 - 1; \
else \
shmPadRect[prIndex] = -1; \
} \
}
////////////////////////////////////////////////////////////////////////////////
//
// Frequently, we want to pull some linear arrays into shared memory (usually
// PSFs) which will be queried often, and we want them close to the threads.
//
// This macro temporarily reassigns all the threads to do the memory copy from
// global memory to shared memory in parallel. Since the array may be bigger
// than the blocksize, some threads may be doing multiple mem copies
//
// ***This is actually the same as the FLOAT version
//
////////////////////////////////////////////////////////////////////////////////
#define COPY_LIN_ARRAY_TO_SHMEM_MORPH(srcPtr, dstPtr, nValues) \
nLoop = (nValues/localPixels)+1; \
for(int loopIdx=0; loopIdx<nLoop; loopIdx++) \
{ \
int prIndex = loopIdx*localPixels + localIdx; \
if(prIndex < nValues) \
{ \
dstPtr[prIndex] = srcPtr[prIndex]; \
} \
}
////////////////////////////////////////////////////////////////////////////////
//
// This macro creates optimized, unrolled versions of the generic
// morphological operation kernel for 3x3 structuring elements.
//
// Since it has no loops, and only one if-statement per thread, it should
// extremely fast. The generic kernel is fast too, but slowed down slightly
// by the doubly-nested for-loops.
//
// TODO: We should create 3x1 and 1x3 functions (and possibly Nx1 & 1xN)
// so that we can further optimize morph ops for separable SEs
//
////////////////////////////////////////////////////////////////////////////////
#define CREATE_3X3_MORPH_KERNEL( name, seTargSum, \
a00, a10, a20, \
a01, a11, a21, \
a02, a12, a22) \
__global__ void Morph3x3_##name##_Kernel( \
int* devInPtr, \
int* devOutPtr, \
int imgCols, \
int imgRows) \
{ \
CREATE_CONVOLUTION_VARIABLES_MORPH(1, 1); \
\
PREPARE_PADDED_RECTANGLE_MORPH(1, 1); \
\
shmOutput[localIdx] = -1;\
\
__syncthreads(); \
\
int accum = 0;\
\
accum += a00 * shmPadRect[IDX_1D(padRectCol-1, padRectRow-1, padRectStride)]; \
accum += a01 * shmPadRect[IDX_1D(padRectCol-1, padRectRow , padRectStride)]; \
accum += a02 * shmPadRect[IDX_1D(padRectCol-1, padRectRow+1, padRectStride)]; \
accum += a10 * shmPadRect[IDX_1D(padRectCol , padRectRow-1, padRectStride)]; \
accum += a11 * shmPadRect[IDX_1D(padRectCol , padRectRow , padRectStride)]; \
accum += a12 * shmPadRect[IDX_1D(padRectCol , padRectRow+1, padRectStride)]; \
accum += a20 * shmPadRect[IDX_1D(padRectCol+1, padRectRow-1, padRectStride)]; \
accum += a21 * shmPadRect[IDX_1D(padRectCol+1, padRectRow , padRectStride)]; \
accum += a22 * shmPadRect[IDX_1D(padRectCol+1, padRectRow+1, padRectStride)]; \
\
if(accum >= seTargSum) \
shmOutput[localIdx] = 1; \
\
__syncthreads(); \
\
devOutPtr[globalIdx] = (shmOutput[localIdx] + 1) / 2; \
}
////////////////////////////////////////////////////////////////////////////////
// This macro simply creates the declarations for the above functions, to be
// used in the header file
////////////////////////////////////////////////////////////////////////////////
#define DECLARE_3X3_MORPH_KERNEL( name ) \
__global__ void Morph3x3_##name##_Kernel( \
int* devInPtr, \
int* devOutPtr, \
int imgCols, \
int imgRows);
////////////////////////////////////////////////////////////////////////////////
// This macro creates member method wrappers for each of the kernels created
// with the CREATE_3X3_MORPH_KERNEL macro.
//
// NOTE: CREATE_3X3_MORPH_KERNEL macro creates KERNEL functions, this macro
// creates member methods in MorphWorkbench that wrap those kernel
// functions. When calling these, you don't need to include the
// <<<GRID,BLOCK>>> as you would with a kernel function
//
////////////////////////////////////////////////////////////////////////////////
#define CREATE_MWB_3X3_FUNCTION( name ) \
void name(void) \
{ \
hipLaunchKernelGGL(( Morph3x3_##name##_Kernel), dim3(GRID_),dim3(BLOCK_), 0, 0, \
*devBufferPtrA_, \
*devBufferPtrB_, \
imageCols_, \
imageRows_); \
flipBuffers(); \
} \
\
void Z##name(int* src, int* dst) \
{ \
hipLaunchKernelGGL(( Morph3x3_##name##_Kernel), dim3(GRID_),dim3(BLOCK_), 0, 0, \
src, \
dst, \
imageCols_, \
imageRows_); \
}
////////////////////////////////////////////////////////////////////////////////
// Standard 3x3 erosions, dilations and median filtering
DECLARE_3X3_MORPH_KERNEL( Dilate )
DECLARE_3X3_MORPH_KERNEL( Erode )
DECLARE_3X3_MORPH_KERNEL( DilateCross )
DECLARE_3X3_MORPH_KERNEL( ErodeCross )
DECLARE_3X3_MORPH_KERNEL( Median )
DECLARE_3X3_MORPH_KERNEL( MedianCross )
////////////////////////////////////////////////////////////////////////////////
// There are 8 standard structuring elements for THINNING
DECLARE_3X3_MORPH_KERNEL( Thin1 );
DECLARE_3X3_MORPH_KERNEL( Thin2 );
DECLARE_3X3_MORPH_KERNEL( Thin3 );
DECLARE_3X3_MORPH_KERNEL( Thin4 );
DECLARE_3X3_MORPH_KERNEL( Thin5 );
DECLARE_3X3_MORPH_KERNEL( Thin6 );
DECLARE_3X3_MORPH_KERNEL( Thin7 );
DECLARE_3X3_MORPH_KERNEL( Thin8 );
////////////////////////////////////////////////////////////////////////////////
// There are 8 standard structuring elements for PRUNING
DECLARE_3X3_MORPH_KERNEL( Prune1 );
DECLARE_3X3_MORPH_KERNEL( Prune2 );
DECLARE_3X3_MORPH_KERNEL( Prune3 );
DECLARE_3X3_MORPH_KERNEL( Prune4 );
DECLARE_3X3_MORPH_KERNEL( Prune5 );
DECLARE_3X3_MORPH_KERNEL( Prune6 );
DECLARE_3X3_MORPH_KERNEL( Prune7 );
DECLARE_3X3_MORPH_KERNEL( Prune8 );
////////////////////////////////////////////////////////////////////////////////
// BASIC UNARY & BINARY *MASK* OPERATORS
//
// Could create LUTs, but I'm not sure the extra implementation complexity
// actually provides much benefit. These ops already run on the order of
// microseconds.
//
// NOTE: These operators are for images with {0,1}, only the MORPHOLOGICAL
// operators will operate with {-1,0,1}
//
////////////////////////////////////////////////////////////////////////////////
__global__ void MaskUnion_Kernel( int* A, int* B, int* devOut);
__global__ void MaskIntersect_Kernel( int* A, int* B, int* devOut);
__global__ void MaskSubtract_Kernel( int* A, int* B, int* devOut);
__global__ void MaskInvert_Kernel( int* A, int* devOut);
__global__ void MaskCopy_Kernel( int* A, int* devOut);
__global__ void MaskCountDiff_Kernel( int* A, int* B, int* globalMemCount);
__global__ void MaskSum_Kernel( int* A, int* globalMemSum);
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// ***Generic Morphologoical Operation Kernel Function***
//
// This is the basis for *ALL* other morpohological operations. Every
// morphological operation in this library can be traced back to this
// (the optimized 3x3 ops are hardcoded/unrolled versions of this function)
//
// For all morph operations, we use {-1, 0, +1} ~ {OFF, DONTCARE, ON}.
// This mapping allows us to use direct integer multiplication and
// summing of SE and image components. Integer multiplication is
// much faster than using lots of if-statements.
//
// Erosion, dilation, median, and a variety of weird and unique
// morphological operations are created solely by adjusting the
// target sum argument (seTargSum).
//
////////////////////////////////////////////////////////////////////////////////
//
// Target Sum Values:
//
// The following describes under what conditions the SE is considered to "hit"
// a chunk of the image, based on how many indvidual pixels it "hits":
//
//
// Erosion: Hit every non-zero pixel
//
// If we hit every pixel, we get a +1 for every non-zero elt
// Therefore, our target should be [seNonZero]
//
// Dilation: Hit at least one non-zero pixel
//
// If we miss every single pixel: sum == -seNonZero
// If we hit one pixel: sum == -seNonZero+2;
// If we hit two pixels: sum == -seNonZero+4;
// ...
// Therefore, our target should be [-seNonZero+1] or greater
//
//
// Median: More pixels hit than not hit
//
// Since each pixel-hit is a +1, and each pixel-miss is a -1,
// the median is 1 if and only if there are more +1s than -1s.
// Therefore, our target should be [0] or greater
//
//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void Morph_Generic_Kernel(
int* devInPtr,
int* devOutPtr,
int imgCols,
int imgRows,
int* sePtr,
int seColRad,
int seRowRad,
int seTargSum);
////////////////////////////////////////////////////////////////////////////////
//
// Structuring Element
//
// Structuring elements (SE) are the Point-Spread Functions (PSF) of image
// morphology. We use {-1, 0, +1} for {OFF, DONTCARE, ON}
//
// NOTE: A structuring element object is directly linked to the device memory
// where the SE data resides. This class allocates the device memory
// on construction and frees it on destruction
//
////////////////////////////////////////////////////////////////////////////////
class StructElt
{
private:
int* devPtr_;
int seCols_;
int seRows_;
int seElts_;
int seBytes_;
int seNonZero_;
public:
void init(int* hostSE, int nc, int nr)
{
int numNonZero = 0;
for(int i=0; i<seElts_; i++)
if(hostSE[i] == -1 || hostSE[i] == 1)
numNonZero++;
init(hostSE, nc, nr, numNonZero);
}
void init(int* hostSE, int nc, int nr, int senz)
{
seCols_ = nc;
seRows_ = nr;
seElts_ = seCols_ * seRows_;
seBytes_ = seElts_ * INT_SZ;
seNonZero_ = senz;
hipMalloc((void**)&devPtr_, seBytes_);
hipMemcpy(devPtr_, hostSE, seBytes_, hipMemcpyHostToDevice);
}
StructElt() :
devPtr_(NULL),
seCols_(-1),
seRows_(-1),
seElts_(-1),
seBytes_(-1),
seNonZero_(0) {}
StructElt(int* hostSE, int nc, int nr) { init(hostSE, nc, nr); }
~StructElt() { hipFree(devPtr_ ); }
int* getDevPtr(void) const {return devPtr_;}
int getCols(void) const {return seCols_;}
int getRows(void) const {return seRows_;}
int getElts(void) const {return seElts_;}
int getBytes(void) const {return seBytes_;}
int getNonZero(void) const {return seNonZero_;}
};
////////////////////////////////////////////////////////////////////////////////
//
// MorphWorkbench
//
// A morphology workbench is used when you have a single image to which you want
// to apply a sequence of dozens, hundreds or thousands of mophology operations.
//
// The workbench copies the input data to the device once at construction,
// and then applies all the operations, only extracting the result from the
// device when "fetchBuffer" is called.
//
// The workbench uses two primary image buffers, which are used to as input and
// output buffers, flipping back and forth every operation. This is so that
// we don't need to keep copying the output back to the input buffer after each
// operation.
//
// There's also on-demand temporary buffers, which may be needed for more
// advanced morphological operations. For instance, the pruning and thinning
// kernels only *locate* pixels that need to be removed. So we have to apply
// the pruning/thinning SEs into a temp buffer, and then subtract that buffer
// from the input. This is why we have devExtraBuffers_.
//
// Static Data:
//
// masterSEList_:
//
// This class keeps a master list of all structuring elements and all
// workbenches. The static list of structuring elements ensures that we
// don't have to keep copying them into device memory every time we want
// to use them, and so that the numNonZero values can be stored and kept
// with them. Otherwise, we would need to recalculate it every time.
//
// masterMwbList_:
//
// Additionally, we keep a running list of pointers to every MorphWorkbench
// ever created (set to null when destructor is called). The only real
// benefit of this is so that we can query how much device memory we are
// using at any given time. See the method, calculateDeviceMemUsage();
//
////////////////////////////////////////////////////////////////////////////////
class MorphWorkbench
{
private:
// The locations of device memory that contain all of our stuff
int* devBuffer1_;
int* devBuffer2_;
vector<int*> devExtraBuffers_;
// We want to keep track of every MWB and structuring element created
// so we can calculate the total memory usage of all workbenches, which
// would include all buffers and SEs
static vector<MorphWorkbench*> masterMwbList_;
static vector<StructElt> masterSEList_;
// This workbench should know where it is in the master MWB list
int mwbID_;
// These two pointers will switch after every operation
int** devBufferPtrA_;
int** devBufferPtrB_;
// Keep pointers to the host memory, so we know where to get input
// and where to put the result
int* hostImageIn_;
bool imageCopied_;
// All buffers in a workbench are the same size: the size of the image
unsigned int imageCols_;
unsigned int imageRows_;
unsigned int imagePixels_;
unsigned int imageBytes_;
// All kernel functions will be called with the same geometry
dim3 GRID_;
dim3 BLOCK_;
// We need temp buffers for operations like thinning, pruning
void createExtraBuffer(void);
void deleteExtraBuffer(void);
int* getExtraBufferPtr(int bufIdx);
// This gets called after every operation to switch Input/Output buffers ptrs
void flipBuffers(void);
public:
dim3 getGridSize(void) const {return GRID_;}
dim3 getBlockSize(void) const {return BLOCK_;}
void setBlockSize(dim3 newSize);
// Calculate the device mem used by all MWBs and SEs
static int calculateDeviceMemUsage(bool printToStdout=true);
// Forking is the really just the same as copying
// TODO: not implemented yet
void forkWorkbench(MorphWorkbench & mwb) const;
static int addStructElt(int* hostSE, int ncols, int nrows);
// Default Constructor
MorphWorkbench();
// Constructor
MorphWorkbench(int* imageStart, int cols, int rows, bool COPY=false);
// Copy host data to device, and prepare kernel parameters
void Initialize(int* imageStart, int cols, int rows, bool COPY=false);
// Destructor
~MorphWorkbench();
// Copy the current state of the buffer to the host
void fetchResult(int* hostTarget) const;
// The basic morphological operations (CPU wrappers for GPU kernels)
// NOTE: all batch functions, such as open, close, thinsweep, etc
// are written so that when the user calls them, buffers A and B are
// distinctly before-and-after versions of the operation. The
// alternative is that A and B only contain the states before and
// after the last SUB-operation, and then the user has no clean
// way to determine if the image changed
void GenericMorphOp(int seIndex, int targSum);
void HitOrMiss(int seIndex);
void Erode(int seIndex);
void Dilate(int seIndex);
void Median(int seIndex);
void Open(int seIndex);
void Close(int seIndex);
void FindAndRemove(int seIndex);
// CPU wrappers for the mask op kernel functions which we need frequently
void Union(int* mask2);
void Intersect(int* mask2);
void Subtract(int* mask2);
void Invert(void);
//int NumPixelsChanged(void);
//int SumMask(void);
void CopyBuffer(int* dst);
static void CopyBuffer(int* src, int* dst, int bytes);
/////////////////////////////////////////////////////////////////////////////
// Thinning is a sequence of 8 hit-or-miss operations which each find
// pixels contributing to the blob width, and then removes them from
// the original image. Very similar to skeletonization
void ThinningSweep(void);
/////////////////////////////////////////////////////////////////////////////
// Pruning uses a sequence of 8 hit-or-miss operations to remove "loose ends"
// from a thinned/skeletonized image.
void PruningSweep(void);
// The macro calls below create wrappers for the optimized 3x3 kernel fns
//
// void NAME(void)
// {
// Morph3x3_NAME_Kernel<<GRID,BLOCK>>>(&debBufA, &devBufB, ...);
// flipBuffers();
// }
// void ZNAME(int* src, int* dst)
// {
// Morph3x3_NAME_Kernel<<GRID,BLOCK>>>(src, dst, ...);
// }
//
CREATE_MWB_3X3_FUNCTION( Dilate );
CREATE_MWB_3X3_FUNCTION( DilateCross );
CREATE_MWB_3X3_FUNCTION( Erode );
CREATE_MWB_3X3_FUNCTION( ErodeCross );
CREATE_MWB_3X3_FUNCTION( Median );
CREATE_MWB_3X3_FUNCTION( MedianCross );
CREATE_MWB_3X3_FUNCTION( Thin1 );
CREATE_MWB_3X3_FUNCTION( Thin2 );
CREATE_MWB_3X3_FUNCTION( Thin3 );
CREATE_MWB_3X3_FUNCTION( Thin4 );
CREATE_MWB_3X3_FUNCTION( Thin5 );
CREATE_MWB_3X3_FUNCTION( Thin6 );
CREATE_MWB_3X3_FUNCTION( Thin7 );
CREATE_MWB_3X3_FUNCTION( Thin8 );
CREATE_MWB_3X3_FUNCTION( Prune1 );
CREATE_MWB_3X3_FUNCTION( Prune2 );
CREATE_MWB_3X3_FUNCTION( Prune3 );
CREATE_MWB_3X3_FUNCTION( Prune4 );
CREATE_MWB_3X3_FUNCTION( Prune5 );
CREATE_MWB_3X3_FUNCTION( Prune6 );
CREATE_MWB_3X3_FUNCTION( Prune7 );
CREATE_MWB_3X3_FUNCTION( Prune8 );
private:
// These operations are the same as above, but with custom src-dst
// and they don't flip the buffers. These are "unsafe" for the
// user to use, since he can destroy the current buffer, but the
// developer can use them in MWB to ensure that batch operations
// leave buffers A and B in a compare-able state
void ZGenericMorphOp(int seIndex, int targSum, int* src, int* dst);
void ZHitOrMiss(int seIndex, int* src, int* dst);
void ZErode(int seIndex, int* src, int* dst);
void ZDilate(int seIndex, int* src, int* dst);
void ZMedian(int seIndex, int* src, int* dst);
void ZOpen(int seIndex, int* src, int* dst, int useTempBuf=0);
void ZClose(int seIndex, int* src, int* dst, int useTempBuf=0);
void ZFindAndRemove(int seIndex, int* src, int* dst, int useTempBuf=0);
// CPU wrappers for the mask op kernel functions which we need frequently
void ZUnion(int* mask2, int* src, int* dst);
void ZIntersect(int* mask2, int* src, int* dst);
void ZSubtract(int* mask2, int* src, int* dst);
void ZInvert(int* src, int* dst);
};
#endif
| de5116724198c75103414fd8b6d865e133ab5b8b.cu | ////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// GPU-Accelerated Morphology Toolbox
//
// Author: Alan Reiner
// Email: [email protected]
// Date: 03 September, 2010
//
// Description: This header provides a complete set of tools to do any sequence
// of arbitrary morphological operations on a binary mask. The
// implementation is a little non-intuitive, because it uses the
// values {-1, 0, 1} internally to represent the mask, instead of
// the usual {0, 1}. This so that we can simultaneously represent
// "Don't Care" values in a structuring element, and we use only
// integer multiplications to do our Hit-Or-Miss (HoM) operations.
// Therefore, there is no need for conditionals in the inner loop
// (which are generally very slow on the GPU).
//
// A user of this library does not really need to understand the
// implementation, only if he intends to expand the library, and
// write extra morphological operations.
//
// The key to understanding the implementation is in the variable
// SE_NON_ZERO:
//
//
// SE_NON_ZERO:
//
// This variable is a side-effect of the highly efficient morphology ops.
// For a given pixel, when you compare the 3x3, you multiply the SE elements
// to the image elements. For each pixel compared, if the SE element is:
// HIT: result 1
// MISS: result -1
// DONTCARE: result 0
//
// We sum up the results for each pixel in the NxN neighborhood. In a hit-
// or-miss operation, we need all the +1 and -1 to match exactly (each
// pixel returns +1), so SE_NON_ZERO should be total number of non-zero
// elements. If we are looking for
// calculated on the fly, but it's constant for each SE, and would slow down
// the kernel significantly
//
// EXAMPLE:
//
// STRUCTURING ELEMENT: 0 1 0
// 1 1 -1
// 0 -1 -1
//
// This SE has 6 non-zero elements. Also important is the fact that ALL
// elements much hit in order to "pass", so we pass in 6 for SE_NON_ZERO
//
// IMAGE CHUNK1: 1 1 -1 Dilate Result: 6
// 1 1 -1
// -1 -1 -1
//
//
// IMAGE CHUNK2: -1 1 -1 Dilate Result: 6
// 1 1 -1
// -1 -1 -1
//
//
// IMAGE CHUNK3: -1 -1 1 Dilate Result: -6
// -1 -1 1
// 1 1 1
//
//
// IMAGE CHUNK4: -1 -1 -1 Dilate Result: 2
// 1 1 -1
// 1 1 -1
//
//
//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
#ifndef _GPU_MORPHOLOGY_CU_
#define _GPU_MORPHOLOGY_CU_
using namespace std;
#include <stdio.h>
#include <vector>
#define IDX_1D(col, row, stride) ((col * stride) + row)
#define COL_2D(index, stride) (index / stride)
#define ROW_2D(index, stride) (index % stride)
#define ROUNDUP32(integer) ( ((integer-1)/32 + 1) * 32 )
#define SHMEM 8192
#define FLOAT_SZ sizeof(float)
#define INT_SZ sizeof(int)
////////////////////////////////////////////////////////////////////////////////
//
// This macros is defined because EVERY convolution-like function has the same
// variables. Mainly, the pixel identifiers for this thread based on block
// size, and the size of the padded rectangle that each block will work with
//
// ***This is actually the same as the CONVOLVE version
//
////////////////////////////////////////////////////////////////////////////////
#define CREATE_CONVOLUTION_VARIABLES_MORPH(psfColRad, psfRowRad) \
\
const int cornerCol = blockDim.x*blockIdx.x; \
const int cornerRow = blockDim.y*blockIdx.y; \
const int globalCol = cornerCol + threadIdx.x; \
const int globalRow = cornerRow + threadIdx.y; \
const int globalIdx = IDX_1D(globalCol, globalRow, imgRows); \
\
const int localCol = threadIdx.x; \
const int localRow = threadIdx.y; \
const int localIdx = IDX_1D(localCol, localRow, blockDim.y); \
const int localPixels = blockDim.x*blockDim.y; \
\
const int padRectStride = blockDim.y + 2*psfRowRad; \
const int padRectCol = localCol + psfColRad; \
const int padRectRow = localRow + psfRowRad; \
/*const int padRectIdx = IDX_1D(padRectCol, padRectRow, padRectStride); */ \
const int padRectPixels = padRectStride * (blockDim.x + 2*psfColRad); \
\
__shared__ int sharedMem[SHMEM]; \
int* shmPadRect = (int*)sharedMem; \
int* shmOutput = (int*)&shmPadRect[ROUNDUP32(padRectPixels)]; \
int nLoop;
////////////////////////////////////////////////////////////////////////////////
//
// We are using -1 as "OFF" and +1 as "ON" and 0 as "DONTCARE"
// The user is not expected to do this him/herself, and it's easy enough to
// manipulate the data on the way in and out (just don't forget to convert back
// before copying out the result
//
////////////////////////////////////////////////////////////////////////////////
#define PREPARE_PADDED_RECTANGLE_MORPH(psfColRad, psfRowRad) \
\
nLoop = (padRectPixels/localPixels)+1; \
for(int loopIdx=0; loopIdx<nLoop; loopIdx++) \
{ \
int prIndex = loopIdx*localPixels + localIdx; \
if(prIndex < padRectPixels) \
{ \
int prCol = COL_2D(prIndex, padRectStride); \
int prRow = ROW_2D(prIndex, padRectStride); \
int glCol = cornerCol + prCol - psfColRad; \
int glRow = cornerRow + prRow - psfRowRad; \
int glIdx = IDX_1D(glCol, glRow, imgRows); \
if(glRow >= 0 && \
glRow < imgRows && \
glCol >= 0 && \
glCol < imgCols) \
shmPadRect[prIndex] = devInPtr[glIdx]*2 - 1; \
else \
shmPadRect[prIndex] = -1; \
} \
}
////////////////////////////////////////////////////////////////////////////////
//
// Frequently, we want to pull some linear arrays into shared memory (usually
// PSFs) which will be queried often, and we want them close to the threads.
//
// This macro temporarily reassigns all the threads to do the memory copy from
// global memory to shared memory in parallel. Since the array may be bigger
// than the blocksize, some threads may be doing multiple mem copies
//
// ***This is actually the same as the FLOAT version
//
////////////////////////////////////////////////////////////////////////////////
#define COPY_LIN_ARRAY_TO_SHMEM_MORPH(srcPtr, dstPtr, nValues) \
nLoop = (nValues/localPixels)+1; \
for(int loopIdx=0; loopIdx<nLoop; loopIdx++) \
{ \
int prIndex = loopIdx*localPixels + localIdx; \
if(prIndex < nValues) \
{ \
dstPtr[prIndex] = srcPtr[prIndex]; \
} \
}
////////////////////////////////////////////////////////////////////////////////
//
// This macro creates optimized, unrolled versions of the generic
// morphological operation kernel for 3x3 structuring elements.
//
// Since it has no loops, and only one if-statement per thread, it should
// extremely fast. The generic kernel is fast too, but slowed down slightly
// by the doubly-nested for-loops.
//
// TODO: We should create 3x1 and 1x3 functions (and possibly Nx1 & 1xN)
// so that we can further optimize morph ops for separable SEs
//
////////////////////////////////////////////////////////////////////////////////
#define CREATE_3X3_MORPH_KERNEL( name, seTargSum, \
a00, a10, a20, \
a01, a11, a21, \
a02, a12, a22) \
__global__ void Morph3x3_##name##_Kernel( \
int* devInPtr, \
int* devOutPtr, \
int imgCols, \
int imgRows) \
{ \
CREATE_CONVOLUTION_VARIABLES_MORPH(1, 1); \
\
PREPARE_PADDED_RECTANGLE_MORPH(1, 1); \
\
shmOutput[localIdx] = -1;\
\
__syncthreads(); \
\
int accum = 0;\
\
accum += a00 * shmPadRect[IDX_1D(padRectCol-1, padRectRow-1, padRectStride)]; \
accum += a01 * shmPadRect[IDX_1D(padRectCol-1, padRectRow , padRectStride)]; \
accum += a02 * shmPadRect[IDX_1D(padRectCol-1, padRectRow+1, padRectStride)]; \
accum += a10 * shmPadRect[IDX_1D(padRectCol , padRectRow-1, padRectStride)]; \
accum += a11 * shmPadRect[IDX_1D(padRectCol , padRectRow , padRectStride)]; \
accum += a12 * shmPadRect[IDX_1D(padRectCol , padRectRow+1, padRectStride)]; \
accum += a20 * shmPadRect[IDX_1D(padRectCol+1, padRectRow-1, padRectStride)]; \
accum += a21 * shmPadRect[IDX_1D(padRectCol+1, padRectRow , padRectStride)]; \
accum += a22 * shmPadRect[IDX_1D(padRectCol+1, padRectRow+1, padRectStride)]; \
\
if(accum >= seTargSum) \
shmOutput[localIdx] = 1; \
\
__syncthreads(); \
\
devOutPtr[globalIdx] = (shmOutput[localIdx] + 1) / 2; \
}
////////////////////////////////////////////////////////////////////////////////
// This macro simply creates the declarations for the above functions, to be
// used in the header file
////////////////////////////////////////////////////////////////////////////////
#define DECLARE_3X3_MORPH_KERNEL( name ) \
__global__ void Morph3x3_##name##_Kernel( \
int* devInPtr, \
int* devOutPtr, \
int imgCols, \
int imgRows);
////////////////////////////////////////////////////////////////////////////////
// This macro creates member method wrappers for each of the kernels created
// with the CREATE_3X3_MORPH_KERNEL macro.
//
// NOTE: CREATE_3X3_MORPH_KERNEL macro creates KERNEL functions, this macro
// creates member methods in MorphWorkbench that wrap those kernel
// functions. When calling these, you don't need to include the
// <<<GRID,BLOCK>>> as you would with a kernel function
//
////////////////////////////////////////////////////////////////////////////////
#define CREATE_MWB_3X3_FUNCTION( name ) \
void name(void) \
{ \
Morph3x3_##name##_Kernel<<<GRID_,BLOCK_>>>( \
*devBufferPtrA_, \
*devBufferPtrB_, \
imageCols_, \
imageRows_); \
flipBuffers(); \
} \
\
void Z##name(int* src, int* dst) \
{ \
Morph3x3_##name##_Kernel<<<GRID_,BLOCK_>>>( \
src, \
dst, \
imageCols_, \
imageRows_); \
}
////////////////////////////////////////////////////////////////////////////////
// Standard 3x3 erosions, dilations and median filtering
DECLARE_3X3_MORPH_KERNEL( Dilate )
DECLARE_3X3_MORPH_KERNEL( Erode )
DECLARE_3X3_MORPH_KERNEL( DilateCross )
DECLARE_3X3_MORPH_KERNEL( ErodeCross )
DECLARE_3X3_MORPH_KERNEL( Median )
DECLARE_3X3_MORPH_KERNEL( MedianCross )
////////////////////////////////////////////////////////////////////////////////
// There are 8 standard structuring elements for THINNING
DECLARE_3X3_MORPH_KERNEL( Thin1 );
DECLARE_3X3_MORPH_KERNEL( Thin2 );
DECLARE_3X3_MORPH_KERNEL( Thin3 );
DECLARE_3X3_MORPH_KERNEL( Thin4 );
DECLARE_3X3_MORPH_KERNEL( Thin5 );
DECLARE_3X3_MORPH_KERNEL( Thin6 );
DECLARE_3X3_MORPH_KERNEL( Thin7 );
DECLARE_3X3_MORPH_KERNEL( Thin8 );
////////////////////////////////////////////////////////////////////////////////
// There are 8 standard structuring elements for PRUNING
DECLARE_3X3_MORPH_KERNEL( Prune1 );
DECLARE_3X3_MORPH_KERNEL( Prune2 );
DECLARE_3X3_MORPH_KERNEL( Prune3 );
DECLARE_3X3_MORPH_KERNEL( Prune4 );
DECLARE_3X3_MORPH_KERNEL( Prune5 );
DECLARE_3X3_MORPH_KERNEL( Prune6 );
DECLARE_3X3_MORPH_KERNEL( Prune7 );
DECLARE_3X3_MORPH_KERNEL( Prune8 );
////////////////////////////////////////////////////////////////////////////////
// BASIC UNARY & BINARY *MASK* OPERATORS
//
// Could create LUTs, but I'm not sure the extra implementation complexity
// actually provides much benefit. These ops already run on the order of
// microseconds.
//
// NOTE: These operators are for images with {0,1}, only the MORPHOLOGICAL
// operators will operate with {-1,0,1}
//
////////////////////////////////////////////////////////////////////////////////
__global__ void MaskUnion_Kernel( int* A, int* B, int* devOut);
__global__ void MaskIntersect_Kernel( int* A, int* B, int* devOut);
__global__ void MaskSubtract_Kernel( int* A, int* B, int* devOut);
__global__ void MaskInvert_Kernel( int* A, int* devOut);
__global__ void MaskCopy_Kernel( int* A, int* devOut);
__global__ void MaskCountDiff_Kernel( int* A, int* B, int* globalMemCount);
__global__ void MaskSum_Kernel( int* A, int* globalMemSum);
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// ***Generic Morphologoical Operation Kernel Function***
//
// This is the basis for *ALL* other morpohological operations. Every
// morphological operation in this library can be traced back to this
// (the optimized 3x3 ops are hardcoded/unrolled versions of this function)
//
// For all morph operations, we use {-1, 0, +1} ~ {OFF, DONTCARE, ON}.
// This mapping allows us to use direct integer multiplication and
// summing of SE and image components. Integer multiplication is
// much faster than using lots of if-statements.
//
// Erosion, dilation, median, and a variety of weird and unique
// morphological operations are created solely by adjusting the
// target sum argument (seTargSum).
//
////////////////////////////////////////////////////////////////////////////////
//
// Target Sum Values:
//
// The following describes under what conditions the SE is considered to "hit"
// a chunk of the image, based on how many indvidual pixels it "hits":
//
//
// Erosion: Hit every non-zero pixel
//
// If we hit every pixel, we get a +1 for every non-zero elt
// Therefore, our target should be [seNonZero]
//
// Dilation: Hit at least one non-zero pixel
//
// If we miss every single pixel: sum == -seNonZero
// If we hit one pixel: sum == -seNonZero+2;
// If we hit two pixels: sum == -seNonZero+4;
// ...
// Therefore, our target should be [-seNonZero+1] or greater
//
//
// Median: More pixels hit than not hit
//
// Since each pixel-hit is a +1, and each pixel-miss is a -1,
// the median is 1 if and only if there are more +1s than -1s.
// Therefore, our target should be [0] or greater
//
//
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
__global__ void Morph_Generic_Kernel(
int* devInPtr,
int* devOutPtr,
int imgCols,
int imgRows,
int* sePtr,
int seColRad,
int seRowRad,
int seTargSum);
////////////////////////////////////////////////////////////////////////////////
//
// Structuring Element
//
// Structuring elements (SE) are the Point-Spread Functions (PSF) of image
// morphology. We use {-1, 0, +1} for {OFF, DONTCARE, ON}
//
// NOTE: A structuring element object is directly linked to the device memory
// where the SE data resides. This class allocates the device memory
// on construction and frees it on destruction
//
////////////////////////////////////////////////////////////////////////////////
class StructElt
{
private:
int* devPtr_;
int seCols_;
int seRows_;
int seElts_;
int seBytes_;
int seNonZero_;
public:
void init(int* hostSE, int nc, int nr)
{
int numNonZero = 0;
for(int i=0; i<seElts_; i++)
if(hostSE[i] == -1 || hostSE[i] == 1)
numNonZero++;
init(hostSE, nc, nr, numNonZero);
}
void init(int* hostSE, int nc, int nr, int senz)
{
seCols_ = nc;
seRows_ = nr;
seElts_ = seCols_ * seRows_;
seBytes_ = seElts_ * INT_SZ;
seNonZero_ = senz;
cudaMalloc((void**)&devPtr_, seBytes_);
cudaMemcpy(devPtr_, hostSE, seBytes_, cudaMemcpyHostToDevice);
}
StructElt() :
devPtr_(NULL),
seCols_(-1),
seRows_(-1),
seElts_(-1),
seBytes_(-1),
seNonZero_(0) {}
StructElt(int* hostSE, int nc, int nr) { init(hostSE, nc, nr); }
~StructElt() { cudaFree(devPtr_ ); }
int* getDevPtr(void) const {return devPtr_;}
int getCols(void) const {return seCols_;}
int getRows(void) const {return seRows_;}
int getElts(void) const {return seElts_;}
int getBytes(void) const {return seBytes_;}
int getNonZero(void) const {return seNonZero_;}
};
////////////////////////////////////////////////////////////////////////////////
//
// MorphWorkbench
//
// A morphology workbench is used when you have a single image to which you want
// to apply a sequence of dozens, hundreds or thousands of mophology operations.
//
// The workbench copies the input data to the device once at construction,
// and then applies all the operations, only extracting the result from the
// device when "fetchBuffer" is called.
//
// The workbench uses two primary image buffers, which are used to as input and
// output buffers, flipping back and forth every operation. This is so that
// we don't need to keep copying the output back to the input buffer after each
// operation.
//
// There's also on-demand temporary buffers, which may be needed for more
// advanced morphological operations. For instance, the pruning and thinning
// kernels only *locate* pixels that need to be removed. So we have to apply
// the pruning/thinning SEs into a temp buffer, and then subtract that buffer
// from the input. This is why we have devExtraBuffers_.
//
// Static Data:
//
// masterSEList_:
//
// This class keeps a master list of all structuring elements and all
// workbenches. The static list of structuring elements ensures that we
// don't have to keep copying them into device memory every time we want
// to use them, and so that the numNonZero values can be stored and kept
// with them. Otherwise, we would need to recalculate it every time.
//
// masterMwbList_:
//
// Additionally, we keep a running list of pointers to every MorphWorkbench
// ever created (set to null when destructor is called). The only real
// benefit of this is so that we can query how much device memory we are
// using at any given time. See the method, calculateDeviceMemUsage();
//
////////////////////////////////////////////////////////////////////////////////
class MorphWorkbench
{
private:
// The locations of device memory that contain all of our stuff
int* devBuffer1_;
int* devBuffer2_;
vector<int*> devExtraBuffers_;
// We want to keep track of every MWB and structuring element created
// so we can calculate the total memory usage of all workbenches, which
// would include all buffers and SEs
static vector<MorphWorkbench*> masterMwbList_;
static vector<StructElt> masterSEList_;
// This workbench should know where it is in the master MWB list
int mwbID_;
// These two pointers will switch after every operation
int** devBufferPtrA_;
int** devBufferPtrB_;
// Keep pointers to the host memory, so we know where to get input
// and where to put the result
int* hostImageIn_;
bool imageCopied_;
// All buffers in a workbench are the same size: the size of the image
unsigned int imageCols_;
unsigned int imageRows_;
unsigned int imagePixels_;
unsigned int imageBytes_;
// All kernel functions will be called with the same geometry
dim3 GRID_;
dim3 BLOCK_;
// We need temp buffers for operations like thinning, pruning
void createExtraBuffer(void);
void deleteExtraBuffer(void);
int* getExtraBufferPtr(int bufIdx);
// This gets called after every operation to switch Input/Output buffers ptrs
void flipBuffers(void);
public:
dim3 getGridSize(void) const {return GRID_;}
dim3 getBlockSize(void) const {return BLOCK_;}
void setBlockSize(dim3 newSize);
// Calculate the device mem used by all MWBs and SEs
static int calculateDeviceMemUsage(bool printToStdout=true);
// Forking is the really just the same as copying
// TODO: not implemented yet
void forkWorkbench(MorphWorkbench & mwb) const;
static int addStructElt(int* hostSE, int ncols, int nrows);
// Default Constructor
MorphWorkbench();
// Constructor
MorphWorkbench(int* imageStart, int cols, int rows, bool COPY=false);
// Copy host data to device, and prepare kernel parameters
void Initialize(int* imageStart, int cols, int rows, bool COPY=false);
// Destructor
~MorphWorkbench();
// Copy the current state of the buffer to the host
void fetchResult(int* hostTarget) const;
// The basic morphological operations (CPU wrappers for GPU kernels)
// NOTE: all batch functions, such as open, close, thinsweep, etc
// are written so that when the user calls them, buffers A and B are
// distinctly before-and-after versions of the operation. The
// alternative is that A and B only contain the states before and
// after the last SUB-operation, and then the user has no clean
// way to determine if the image changed
void GenericMorphOp(int seIndex, int targSum);
void HitOrMiss(int seIndex);
void Erode(int seIndex);
void Dilate(int seIndex);
void Median(int seIndex);
void Open(int seIndex);
void Close(int seIndex);
void FindAndRemove(int seIndex);
// CPU wrappers for the mask op kernel functions which we need frequently
void Union(int* mask2);
void Intersect(int* mask2);
void Subtract(int* mask2);
void Invert(void);
//int NumPixelsChanged(void);
//int SumMask(void);
void CopyBuffer(int* dst);
static void CopyBuffer(int* src, int* dst, int bytes);
/////////////////////////////////////////////////////////////////////////////
// Thinning is a sequence of 8 hit-or-miss operations which each find
// pixels contributing to the blob width, and then removes them from
// the original image. Very similar to skeletonization
void ThinningSweep(void);
/////////////////////////////////////////////////////////////////////////////
// Pruning uses a sequence of 8 hit-or-miss operations to remove "loose ends"
// from a thinned/skeletonized image.
void PruningSweep(void);
// The macro calls below create wrappers for the optimized 3x3 kernel fns
//
// void NAME(void)
// {
// Morph3x3_NAME_Kernel<<GRID,BLOCK>>>(&debBufA, &devBufB, ...);
// flipBuffers();
// }
// void ZNAME(int* src, int* dst)
// {
// Morph3x3_NAME_Kernel<<GRID,BLOCK>>>(src, dst, ...);
// }
//
CREATE_MWB_3X3_FUNCTION( Dilate );
CREATE_MWB_3X3_FUNCTION( DilateCross );
CREATE_MWB_3X3_FUNCTION( Erode );
CREATE_MWB_3X3_FUNCTION( ErodeCross );
CREATE_MWB_3X3_FUNCTION( Median );
CREATE_MWB_3X3_FUNCTION( MedianCross );
CREATE_MWB_3X3_FUNCTION( Thin1 );
CREATE_MWB_3X3_FUNCTION( Thin2 );
CREATE_MWB_3X3_FUNCTION( Thin3 );
CREATE_MWB_3X3_FUNCTION( Thin4 );
CREATE_MWB_3X3_FUNCTION( Thin5 );
CREATE_MWB_3X3_FUNCTION( Thin6 );
CREATE_MWB_3X3_FUNCTION( Thin7 );
CREATE_MWB_3X3_FUNCTION( Thin8 );
CREATE_MWB_3X3_FUNCTION( Prune1 );
CREATE_MWB_3X3_FUNCTION( Prune2 );
CREATE_MWB_3X3_FUNCTION( Prune3 );
CREATE_MWB_3X3_FUNCTION( Prune4 );
CREATE_MWB_3X3_FUNCTION( Prune5 );
CREATE_MWB_3X3_FUNCTION( Prune6 );
CREATE_MWB_3X3_FUNCTION( Prune7 );
CREATE_MWB_3X3_FUNCTION( Prune8 );
private:
// These operations are the same as above, but with custom src-dst
// and they don't flip the buffers. These are "unsafe" for the
// user to use, since he can destroy the current buffer, but the
// developer can use them in MWB to ensure that batch operations
// leave buffers A and B in a compare-able state
void ZGenericMorphOp(int seIndex, int targSum, int* src, int* dst);
void ZHitOrMiss(int seIndex, int* src, int* dst);
void ZErode(int seIndex, int* src, int* dst);
void ZDilate(int seIndex, int* src, int* dst);
void ZMedian(int seIndex, int* src, int* dst);
void ZOpen(int seIndex, int* src, int* dst, int useTempBuf=0);
void ZClose(int seIndex, int* src, int* dst, int useTempBuf=0);
void ZFindAndRemove(int seIndex, int* src, int* dst, int useTempBuf=0);
// CPU wrappers for the mask op kernel functions which we need frequently
void ZUnion(int* mask2, int* src, int* dst);
void ZIntersect(int* mask2, int* src, int* dst);
void ZSubtract(int* mask2, int* src, int* dst);
void ZInvert(int* src, int* dst);
};
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.