hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
f84b757b0a8876dfbf0f36247049e978e37a686f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "setTargetIndexNormalize.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double sum = 1;
double *w = NULL;
hipMalloc(&w, XSIZE*YSIZE);
double *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
double *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
setTargetIndexNormalize), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sum,w,out,output);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
setTargetIndexNormalize), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sum,w,out,output);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
setTargetIndexNormalize), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sum,w,out,output);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
f84b757b0a8876dfbf0f36247049e978e37a686f.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "setTargetIndexNormalize.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double sum = 1;
double *w = NULL;
cudaMalloc(&w, XSIZE*YSIZE);
double *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
double *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
setTargetIndexNormalize<<<gridBlock,threadBlock>>>(n,sum,w,out,output);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
setTargetIndexNormalize<<<gridBlock,threadBlock>>>(n,sum,w,out,output);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
setTargetIndexNormalize<<<gridBlock,threadBlock>>>(n,sum,w,out,output);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
c0a09516e16cd02c44e78c7c9ddfc17fdb6e91c4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Dx_Forward_Kernel(float* output, const float* input, const int width, const int height, const int nChannels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height)
return;
int offset = y*width + x;
if (x == width - 1)
{
for (int c = 0; c < nChannels; c++)
output[offset*nChannels + c] = 0;
}
else
{
for (int c = 0; c < nChannels; c++)
output[offset*nChannels + c] = input[(offset + 1)*nChannels + c] - input[offset*nChannels + c];
}
}
|
c0a09516e16cd02c44e78c7c9ddfc17fdb6e91c4.cu
|
#include "includes.h"
__global__ void Dx_Forward_Kernel(float* output, const float* input, const int width, const int height, const int nChannels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height)
return;
int offset = y*width + x;
if (x == width - 1)
{
for (int c = 0; c < nChannels; c++)
output[offset*nChannels + c] = 0;
}
else
{
for (int c = 0; c < nChannels; c++)
output[offset*nChannels + c] = input[(offset + 1)*nChannels + c] - input[offset*nChannels + c];
}
}
|
68eb48f21762824aff6bd90e1566cff10f3adcd7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "types-gpu.h"
#include "mblas/handles.h"
namespace amunmt {
namespace GPU {
void HandleError(hipError_t err, const char *file, int line ) {
if (err != hipSuccess) {
std::cerr << "ERROR: " << hipGetErrorString(err) << " in " << file << " at line " << line << std::endl;
exit( EXIT_FAILURE );
}
}
}
}
|
68eb48f21762824aff6bd90e1566cff10f3adcd7.cu
|
#include <iostream>
#include "types-gpu.h"
#include "mblas/handles.h"
namespace amunmt {
namespace GPU {
void HandleError(cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
std::cerr << "ERROR: " << cudaGetErrorString(err) << " in " << file << " at line " << line << std::endl;
exit( EXIT_FAILURE );
}
}
}
}
|
fb7586e12fefb063a65b20b2b9e9c135dd6acefc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <random>
#include <math.h>
#include <algorithm>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <malloc.h>
#include <time.h>
#define ROWLEN 1024*5
#define COLLEN 1024*5
#define MAX 10
//N = arraySize
//#define n 512
//#define multiple 3
//#define N 1024
//S = threadNum
#define S 32
#define RL ROWLEN / S
#define CL COLLEN / S
clock_t start, stop; //clock_tclock()
double duration;
using namespace std;
//Use the GPU to calculate the KNN's answer
__global__ void getDistanceGPU(double trainSet[COLLEN][ROWLEN], double* testData, double* dis)
{
int xid = threadIdx.x + blockDim.x * blockIdx.x;
int yid = threadIdx.y + blockDim.y * blockIdx.y;
int row = yid;
int col = xid;
if (col < ROWLEN && row < COLLEN)
{
double temp = 0;
for (int i = 0; i < ROWLEN; i++)
{
temp += pow((trainSet[row][i] - testData[i]), 2);
}
dis[row] = sqrt(temp);
}
}
void gpuCal(double a[ROWLEN][COLLEN], double b[ROWLEN], double c[COLLEN])
{
double (*dev_a)[ROWLEN];
double* dev_b;
double* dev_c;
//GPU
hipMalloc((void**)&dev_a, ROWLEN * COLLEN * sizeof(double));
hipMalloc((void**)&dev_b, ROWLEN * sizeof(double));
hipMalloc((void**)&dev_c, COLLEN * sizeof(double));
//CPUGPU
hipMemcpy(dev_a, a, ROWLEN * COLLEN * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, ROWLEN * sizeof(double), hipMemcpyHostToDevice);
//Event
float time = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//GPU
dim3 threadsPerBlock(S, S);
dim3 blocksPerGrid(RL, CL);
getDistanceGPU << <blocksPerGrid, threadsPerBlock >> > (dev_a, dev_b, dev_c);
//
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
//
hipEventElapsedTime(&time, start, stop);
//CPU
hipMemcpy(c, dev_c, COLLEN * sizeof(double), hipMemcpyDeviceToHost);
/* for (int j = 0; j < COLLEN; j++)
{
printf("%f ", c[j]);
}
printf("\n");*/
printf("GPU: spendTime: %fms\n\n\n", time);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
}
//Calculate the distance between testData and dataSet[i]
double getDistance(double* d1, int* d2);
//calculate all the distance between testData and each training data
void getAllDistance(double trainSet[ROWLEN][COLLEN], double* testData, double* discard_block);
// Randomly generated training set
void randNum(double trainSet[ROWLEN][COLLEN], int rlen, int clen);
//Randomly generated testDate
void randNum(double* testData, int clen);
//Print the trainSet
void print(double trainSet[ROWLEN][COLLEN], int rlen, int clen);
//Print the testSet
void print(double* testData, int clen);
int main(int argc, char const* argv[])
{
double (*trainSet)[ROWLEN];
double* testData;
double* dis;
trainSet = new double[ROWLEN][COLLEN];
testData = new double[COLLEN];
dis = new double[ROWLEN];
randNum(trainSet, ROWLEN, COLLEN);
randNum(testData, COLLEN);
gpuCal(trainSet, testData, dis);
getAllDistance(trainSet, testData, dis);
cout << "-----------------trainSet----------------------------" << endl;
//print(trainSet, ROWLEN, COLLEN);
cout << "-----------------testSet----------------------------" << endl;
//print(testData, COLLEN);
cout << "-----------------dis-------------------------------" << endl;
print(dis, COLLEN);
sort(dis, dis + COLLEN);
print(dis, COLLEN);
return 0;
}
//Calculate the distance between trainSet and testData
double getDistance(double* d1, double* d2)
{
double dis = 0;
for (int i = 0; i < COLLEN; i++)
{
dis += pow((d1[i] - d2[i]), 2);
}
return sqrt(dis);
}
//calculate all the distance between testData and each training data
void getAllDistance(double trainSet[ROWLEN][COLLEN], double* testData, double* dis)
{
start = clock();
//******************************
//* *
for (int i = 0; i < ROWLEN; i++)
{
dis[i] = getDistance(trainSet[i], testData);
}
//******************************
stop = clock();
duration = (double)(stop - start) / CLK_TCK; //CLK_TCKclock()
printf("CPU: spendTime: %fms\n\n\n", duration * 1000);
}
// Randomly generated training set
void randNum(double trainSet[ROWLEN][COLLEN], int rlen, int clen)
{
for (int i = 0; i < rlen; i++)
{
for (int j = 0; j < clen; j++)
{
trainSet[i][j] = rand() % MAX;
}
}
}
//Randomly generated testDatd
void randNum(double* testData, int clen)
{
for (int i = 0; i < clen; i++)
{
testData[i] = rand() % MAX;
}
}
//Print the trainSet
void print(double trainSet[ROWLEN][COLLEN], int rlen, int clen)
{
for (int i = 0; i < rlen; i++)
{
for (int j = 0; j < clen; j++)
{
cout << trainSet[i][j] << " ";
}
cout << endl;
}
}
//Print the testSet
void print(double* testData, int clen)
{
for (int i = 0; i < clen; i++)
{
cout << testData[i] << " ";
}
cout << endl;
}
|
fb7586e12fefb063a65b20b2b9e9c135dd6acefc.cu
|
#include "cuda_runtime.h"
#include <iostream>
#include <random>
#include <math.h>
#include <algorithm>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <malloc.h>
#include <time.h>
#define ROWLEN 1024*5
#define COLLEN 1024*5
#define MAX 10
//N = arraySize
//#define n 512
//#define multiple 3
//#define N 1024
//S = threadNum
#define S 32
#define RL ROWLEN / S
#define CL COLLEN / S
clock_t start, stop; //clock_t为clock()函数返回的变量类型
double duration;
using namespace std;
//Use the GPU to calculate the KNN's answer
__global__ void getDistanceGPU(double trainSet[COLLEN][ROWLEN], double* testData, double* dis)
{
int xid = threadIdx.x + blockDim.x * blockIdx.x;
int yid = threadIdx.y + blockDim.y * blockIdx.y;
int row = yid;
int col = xid;
if (col < ROWLEN && row < COLLEN)
{
double temp = 0;
for (int i = 0; i < ROWLEN; i++)
{
temp += pow((trainSet[row][i] - testData[i]), 2);
}
dis[row] = sqrt(temp);
}
}
void gpuCal(double a[ROWLEN][COLLEN], double b[ROWLEN], double c[COLLEN])
{
double (*dev_a)[ROWLEN];
double* dev_b;
double* dev_c;
//在GPU中开辟空间
cudaMalloc((void**)&dev_a, ROWLEN * COLLEN * sizeof(double));
cudaMalloc((void**)&dev_b, ROWLEN * sizeof(double));
cudaMalloc((void**)&dev_c, COLLEN * sizeof(double));
//将CPU内容复制到GPU
cudaMemcpy(dev_a, a, ROWLEN * COLLEN * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, ROWLEN * sizeof(double), cudaMemcpyHostToDevice);
//声明时间Event
float time = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//GPU开始计算
dim3 threadsPerBlock(S, S);
dim3 blocksPerGrid(RL, CL);
getDistanceGPU << <blocksPerGrid, threadsPerBlock >> > (dev_a, dev_b, dev_c);
//结束计时
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
//计算时间差
cudaEventElapsedTime(&time, start, stop);
//将内容拷贝回CPU
cudaMemcpy(c, dev_c, COLLEN * sizeof(double), cudaMemcpyDeviceToHost);
/* for (int j = 0; j < COLLEN; j++)
{
printf("%f ", c[j]);
}
printf("\n");*/
printf("GPU: spendTime: %fms\n\n\n", time);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
//Calculate the distance between testData and dataSet[i]
double getDistance(double* d1, int* d2);
//calculate all the distance between testData and each training data
void getAllDistance(double trainSet[ROWLEN][COLLEN], double* testData, double* discard_block);
// Randomly generated training set
void randNum(double trainSet[ROWLEN][COLLEN], int rlen, int clen);
//Randomly generated testDate
void randNum(double* testData, int clen);
//Print the trainSet
void print(double trainSet[ROWLEN][COLLEN], int rlen, int clen);
//Print the testSet
void print(double* testData, int clen);
int main(int argc, char const* argv[])
{
double (*trainSet)[ROWLEN];
double* testData;
double* dis;
trainSet = new double[ROWLEN][COLLEN];
testData = new double[COLLEN];
dis = new double[ROWLEN];
randNum(trainSet, ROWLEN, COLLEN);
randNum(testData, COLLEN);
gpuCal(trainSet, testData, dis);
getAllDistance(trainSet, testData, dis);
cout << "-----------------trainSet----------------------------" << endl;
//print(trainSet, ROWLEN, COLLEN);
cout << "-----------------testSet----------------------------" << endl;
//print(testData, COLLEN);
cout << "-----------------dis-------------------------------" << endl;
print(dis, COLLEN);
sort(dis, dis + COLLEN);
print(dis, COLLEN);
return 0;
}
//Calculate the distance between trainSet and testData
double getDistance(double* d1, double* d2)
{
double dis = 0;
for (int i = 0; i < COLLEN; i++)
{
dis += pow((d1[i] - d2[i]), 2);
}
return sqrt(dis);
}
//calculate all the distance between testData and each training data
void getAllDistance(double trainSet[ROWLEN][COLLEN], double* testData, double* dis)
{
start = clock();
//******************************
//*这里写你所要测试运行时间的程序 *
for (int i = 0; i < ROWLEN; i++)
{
dis[i] = getDistance(trainSet[i], testData);
}
//******************************
stop = clock();
duration = (double)(stop - start) / CLK_TCK; //CLK_TCK为clock()函数的时间单位,即时钟打点
printf("CPU: spendTime: %fms\n\n\n", duration * 1000);
}
// Randomly generated training set
void randNum(double trainSet[ROWLEN][COLLEN], int rlen, int clen)
{
for (int i = 0; i < rlen; i++)
{
for (int j = 0; j < clen; j++)
{
trainSet[i][j] = rand() % MAX;
}
}
}
//Randomly generated testDatd
void randNum(double* testData, int clen)
{
for (int i = 0; i < clen; i++)
{
testData[i] = rand() % MAX;
}
}
//Print the trainSet
void print(double trainSet[ROWLEN][COLLEN], int rlen, int clen)
{
for (int i = 0; i < rlen; i++)
{
for (int j = 0; j < clen; j++)
{
cout << trainSet[i][j] << " ";
}
cout << endl;
}
}
//Print the testSet
void print(double* testData, int clen)
{
for (int i = 0; i < clen; i++)
{
cout << testData[i] << " ";
}
cout << endl;
}
|
c376a85758d6b9b1609d85e3d79263f90db60bb0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "topk_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "device_atomic_functions.h"
#include "hipcub/hipcub.hpp"
#include "cub/util_type.cuh"
#include "hipcub/hipcub.hpp"
#include "hipcub/hipcub.hpp"
#include <limits>
namespace onnxruntime {
namespace cuda {
using namespace cub;
template <typename T>
struct KV {
T key;
int64_t val;
};
#define BT GridDim::maxThreadsPerBlock
#define ALIGN(N) static_cast<int64_t>(pow(2, ceil(log2(static_cast<double>(N)))))
#define FROM(idx) (left_dim + (idx)*mid_dim + right_dim)
#define TO(idx) (left_dim * K / dimension + (idx)*mid_dim + right_dim)
#define TRIVIAL (1 == largest ? type_min : type_max)
#define BIGGER(n, m) (n.key > m.key ? n : (n.key < m.key ? m : (n.val > m.val ? (1 == largest ? m : n) : (1 == largest ? n : m))))
#define SMALLER(n, m) (n.key < m.key ? n : (n.key > m.key ? m : (n.val < m.val ? (1 == largest ? m : n) : (1 == largest ? n : m))))
#define IS_SMALLER(n, m) (n.key < m.key || !(n.key > m.key) && (1 == largest ? n.val > m.val : n.val < m.val))
#define LESS(n, m) ((n) <= (m) ? (n) : (m))
template <typename T>
__global__ void BitonicTopK(const T* X, T* V, int64_t* I, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t aligned_K, int64_t largest, int64_t sorted, int64_t dimension, int64_t aligned_dimension, T type_min, T type_max) {
auto tid = threadIdx.x;
auto bid = blockIdx.x;
extern __shared__ char shared_mem[];
auto S = (KV<T>*)(shared_mem);
auto mid_dim = axis == size - 1 ? 1 : elem_nums[axis + 1];
auto left_dim = bid / mid_dim * elem_nums[axis];
auto right_dim = axis == size - 1 ? 0 : bid % elem_nums[axis + 1];
for (auto i = tid; i < aligned_dimension; i += blockDim.x) {
S[i].key = i < dimension ? X[FROM(i)] : TRIVIAL;
S[i].val = i;
}
__syncthreads();
//sort each K
for (int64_t len = 1; len < aligned_K; len <<= 1) {
auto dir = len << 1;
for (auto inc = len; inc > 0; inc >>= 1) {
auto low = tid & (inc - 1);
auto i = (tid << 1) - low;
auto j = i + inc;
if (j < aligned_dimension) {
auto reverse = (dir & i) == 0;
auto swap = reverse ^ IS_SMALLER(S[i], S[j]);
if (swap) {
auto tmp = S[i];
S[i] = S[j];
S[j] = tmp;
}
}
__syncthreads();
}
__syncthreads();
}
//merge and rebuild K
for (int64_t len = aligned_K; len < aligned_dimension; len <<= 1) {
auto dir = len << 1;
auto i = (tid << 1) - (tid & (len - 1));
auto j = i + len;
if (i % dir < aligned_K && j < aligned_dimension) {
S[i] = 1 == largest ? BIGGER(S[i], S[j]) : SMALLER(S[i], S[j]);
}
__syncthreads();
for (auto inc = aligned_K >> 1; inc > 0; inc >>= 1) {
auto ii = (tid << 1) - (tid & (inc - 1));
auto jj = ii + inc;
if (ii % dir < aligned_K && jj < aligned_dimension) {
auto reverse = (dir & ii) == 0;
auto swap = reverse ^ IS_SMALLER(S[ii], S[jj]);
if (swap) {
auto tmp = S[ii];
S[ii] = S[jj];
S[jj] = tmp;
}
}
__syncthreads();
}
__syncthreads();
}
//save top K
if (1 == sorted) {
if (1 == largest) {
auto start = aligned_K - K;
if (tid >= start && tid < aligned_K) {
auto to = TO(aligned_K - 1 - tid);
V[to] = S[tid].key;
I[to] = S[tid].val;
}
} else {
if (tid < K) {
auto to = TO(tid);
V[to] = S[tid].key;
I[to] = S[tid].val;
}
}
} else {
if (1 == largest) {
auto start = aligned_K - K;
if (tid < start) {
S[tid].val = aligned_dimension;
}
} else {
if (tid >= K && tid < aligned_K) {
S[tid].val = aligned_dimension;
}
}
__syncthreads();
//sort by index ascending
for (int64_t len = 1; len < aligned_K; len <<= 1) {
auto dir = len << 1;
for (int64_t inc = len; inc > 0; inc >>= 1) {
auto low = tid & (inc - 1);
auto i = (tid << 1) - low;
auto j = i + inc;
if (j < aligned_K) {
auto reverse = (dir & i) == 0;
auto swap = reverse ^ (S[i].val < S[j].val);
if (swap) {
auto tmp = S[i];
S[i] = S[j];
S[j] = tmp;
}
}
__syncthreads();
}
__syncthreads();
}
if (tid < K) {
auto to = TO(tid);
V[to] = S[tid].key;
I[to] = S[tid].val;
}
}
}
template <typename T>
__device__ __inline__ bool Equal(const T& t0, const T& t1) {
auto t2 = t0 > t1 ? t0 - t1 : t1 - t0;
return (double)t2 < 1.0e-5;
}
template<typename T>
__device__ bool SamePrefix(const T* t0, const T* t1, int64_t skip) {
return (((*t0)^(*t1))>>skip) == 0;
}
__device__ bool SamePrefix(const float* f0, const float* f1, int64_t skip) {
return SamePrefix((const int32_t*)f0, (const int32_t*)f1, skip);
}
__device__ bool SamePrefix(const double* d0, const double* d1, int64_t skip) {
return SamePrefix((const int64_t*)d0, (const int64_t*)d1, skip);
}
template<typename T>
__device__ int32_t Radix(const T* t, int64_t skip) {
return ((*t)>>skip)&255;
}
__device__ int32_t Radix(const float* f, int64_t skip) {
return Radix((const int32_t*)f, skip);
}
__device__ int32_t Radix(const double* d, int64_t skip) {
return Radix((const double*)d, skip);
}
template<typename T>
__device__ void SetByte(T* t, int64_t byte) {
(*t) |= byte;
}
__device__ void SetByte(float* f, int64_t byte) {
SetByte((int32_t*)f, byte);
}
__device__ void SetByte(double* d, int64_t byte) {
SetByte((int64_t*)d, byte);
}
template<typename T, int64_t THREADS, int64_t KPT>
__global__ void RadixTopK(const T* X, T* V, int64_t* I, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t dimension, int64_t XPT, T type_min, T type_max) {
auto tid = threadIdx.x;
auto bid = blockIdx.x;
extern __shared__ char shared_mem[];
auto H = (uint32_t*)shared_mem;
auto mid_dim = axis == size - 1 ? 1 : elem_nums[axis + 1];
auto left_dim = bid / mid_dim * elem_nums[axis];
auto right_dim = axis == size - 1 ? 0 : bid % elem_nums[axis + 1];
T Kth = (T)0, sign = (T)1;
typedef BlockScan<uint32_t, THREADS> BlockScan;
typedef BlockReduce<uint32_t, THREADS> BlockReduce;
typedef BlockRadixSort<T, THREADS, KPT, int64_t> BlockRadixSort;
__shared__ union {
typename BlockScan::TempStorage scan;
typename BlockReduce::TempStorage reduce;
typename BlockRadixSort::TempStorage sort;
} temp_storage;
uint32_t positive = 0, negative = 0;
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
T x = X[FROM(x_i)];
if (x > 0) {
++positive;
} else if (x < 0) {
++negative;
}
}
__syncthreads();
positive = BlockReduce(temp_storage.reduce).Sum(positive);
negative = BlockReduce(temp_storage.reduce).Sum(negative);
if (0 == tid) {
H[0] = positive;
H[1] = negative;
}
__syncthreads();
positive = H[0];
negative = H[1];
if ((1 == largest && (K <= positive || dimension - K + 1 <= negative)) ||
(0 == largest && (K <= negative || dimension - K + 1 <= positive))) {
auto KK = K;
if (1 == largest) {
if (KK > positive) {
KK = dimension - KK + 1;
sign = (T)-1;
}
} else {
if (KK > negative) {
KK = dimension - KK + 1;
} else {
sign = (T)-1;
}
}
__syncthreads();
#pragma unroll
for (int64_t byte = sizeof(T)-1; byte > -1; --byte) {
if (tid < 256) H[tid] = 0;
__syncthreads();
auto skip = 8 * byte, prev_skip = 8 * (byte + 1);
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
T x = sign*X[FROM(x_i)];
if (x > 0 && (byte == sizeof(T) - 1 || SamePrefix(&x, &Kth, prev_skip))) {
atomicAdd(&H[Radix(&x, skip)], 1);
}
}
__syncthreads();
for (int64_t radix = 255; radix > 0; --radix) {
if (H[radix] < KK) {
KK -= H[radix];
} else {
SetByte(&Kth, radix<<skip);
break;
}
}
__syncthreads();
}
Kth *= sign;
}
uint32_t superior = 0, equal = 0;
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
auto x = X[FROM(x_i)];
if (1 == largest && x > Kth || 0 == largest && x < Kth) {
++superior;
} else if (Equal(x, Kth)) {
++equal;
}
}
__syncthreads();
auto all_superior = superior;
all_superior = BlockReduce(temp_storage.reduce).Sum(all_superior);
if (0 == tid) {
H[0] = all_superior;
}
__syncthreads();
all_superior = H[0];
BlockScan(temp_storage.scan).ExclusiveSum(superior, superior);
BlockScan(temp_storage.scan).ExclusiveSum(equal, equal);
__syncthreads();
auto equal_quota = K - all_superior - equal;
auto output_i = superior + LESS(K - all_superior, equal);
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
auto x = X[FROM(x_i)];
if (1 == largest && x > Kth || 0 == largest && x < Kth) {
auto to_i = TO(output_i);
V[to_i] = x;
I[to_i] = x_i;
++output_i;
} else if (Equal(x, Kth) && equal_quota > 0) {
auto to_i = TO(output_i);
V[to_i] = x;
I[to_i] = x_i;
++output_i;
--equal_quota;
}
}
__syncthreads();
if (1 == sorted) {
T keys[KPT];
int64_t vals[KPT];
for (int64_t k_i = tid, k_c = 0; k_c < KPT; k_i += blockDim.x, ++k_c) {
if (k_i < K) {
auto to_i = TO(k_i);
keys[k_c] = V[to_i];
vals[k_c] = I[to_i];
} else {
if (1 == largest) {
keys[k_c] = type_min;
} else {
keys[k_c] = type_max;
}
}
}
__syncthreads();
if (1 == largest) {
BlockRadixSort(temp_storage.sort).SortDescending(keys, vals);
} else {
BlockRadixSort(temp_storage.sort).Sort(keys, vals);
}
__syncthreads();
#pragma unroll
for (int64_t k_c = 0; k_c < KPT; ++k_c) {
auto k_i = tid * KPT + k_c;
if (k_i < K) {
auto to_i = TO(k_i);
V[to_i] = keys[k_c];
I[to_i] = vals[k_c];
}
}
}
}
template <typename T>
__global__ void FillInput(const T* input_x, T* output_v, int64_t* output_i, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t offset, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, dimension);
auto left = offset / (axis == size - 1 ? 1 : elem_nums[axis + 1]) * elem_nums[axis];
auto right = axis == size - 1 ? 0 : offset % elem_nums[axis + 1];
auto input_offset = left + id * (axis == size - 1 ? 1 : elem_nums[axis + 1]) + right;
output_v[id] = input_x[input_offset];
output_i[id] = id;
}
template <typename T>
__global__ void FillOutput(const T* input_v, const int64_t* input_i, T* output_v, int64_t* output_i, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t offset, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, K);
auto left = offset / (axis == size - 1 ? 1 : elem_nums[axis + 1]) * elem_nums[axis] * K / dimension;
auto right = axis == size - 1 ? 0 : offset % elem_nums[axis + 1];
auto output_offset = left + id * (axis == size - 1 ? 1 : elem_nums[axis + 1]) + right;
output_v[output_offset] = input_v[id];
output_i[output_offset] = input_i[id];
}
__global__ void ExcludeOutput(int64_t* output_i, int64_t K, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, dimension);
if (id >= K) {
output_i[id] = dimension;
}
}
template <typename T>
Status TopKImpl(const CudaKernel* kernel, const T* input_x, T* output_v, int64_t* output_i, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t N, int64_t dimension) {
auto aligned_K = ALIGN(K);
auto aligned_dimension = ALIGN(dimension);
if (aligned_dimension <= GridDim::maxThreadsPerBlock) {
hipLaunchKernelGGL(( BitonicTopK<T>), dim3(N), dim3(GridDim::maxThreadsPerBlock), aligned_dimension * sizeof(KV<T>), 0, input_x, output_v, output_i, elem_nums, size, axis, K, aligned_K, largest, sorted, dimension, aligned_dimension, std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max());
} else if (K <= BT*16 || 0 == sorted) {
auto XPT = static_cast<int64_t>(ceil(static_cast<double>(dimension) / GridDim::maxThreadsPerBlock));
if (BT*2 >= K || 0 == sorted) {
hipLaunchKernelGGL(( RadixTopK<T,BT,2>), dim3(N),dim3(BT),256*sizeof(uint32_t), 0, input_x, output_v, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max());
} else if (BT*4>=K) {
hipLaunchKernelGGL(( RadixTopK<T,BT,4>), dim3(N),dim3(BT),256*sizeof(uint32_t), 0, input_x, output_v, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max());
} else if (BT*8>=K) {
hipLaunchKernelGGL(( RadixTopK<T,BT,8>), dim3(N),dim3(BT),256*sizeof(uint32_t), 0, input_x, output_v, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max());
} else {
hipLaunchKernelGGL(( RadixTopK<T,BT,16>), dim3(N),dim3(BT),256*sizeof(uint32_t), 0, input_x, output_v, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max());
}
} else {
auto input_key_buffer = kernel->GetScratchBuffer<T>(dimension);
auto output_key_buffer = kernel->GetScratchBuffer<T>(dimension);
auto input_value_buffer = kernel->GetScratchBuffer<int64_t>(dimension);
auto output_value_buffer = kernel->GetScratchBuffer<int64_t>(dimension);
auto* input_key = input_key_buffer.get();
auto* output_key = output_key_buffer.get();
auto* input_value = input_value_buffer.get();
auto* output_value = output_value_buffer.get();
size_t temp_bytes = 0;
CUDA_RETURN_IF_ERROR(hipcub::DeviceRadixSort::SortPairs(nullptr, temp_bytes, input_key, output_key, input_value, output_value, dimension));
auto temp_storage_buffer = kernel->GetScratchBuffer<char>(temp_bytes);
auto* temp_storage = temp_storage_buffer.get();
auto blocks_per_grid_D = (int)(ceil(static_cast<float>(dimension) / BT));
auto blocks_per_grid_K = (int)(ceil(static_cast<float>(K) / BT));
for (int64_t i = 0; i < N; i++) {
hipLaunchKernelGGL(( FillInput<T>), dim3(blocks_per_grid_D), dim3(BT), 0, 0, input_x, input_key, input_value, elem_nums, size, axis, K, i, dimension);
CUDA_RETURN_IF_ERROR(1 == largest ? hipcub::DeviceRadixSort::SortPairsDescending(temp_storage, temp_bytes, input_key, output_key, input_value, output_value, dimension) : hipcub::DeviceRadixSort::SortPairs(temp_storage, temp_bytes, input_key, output_key, input_value, output_value, dimension));
if (1 == sorted) {
hipLaunchKernelGGL(( FillOutput<T>), dim3(blocks_per_grid_K), dim3(BT), 0, 0, output_key, output_value, output_v, output_i, elem_nums, size, axis, K, i, dimension);
} else { //reorder by ascending index
hipLaunchKernelGGL(( ExcludeOutput), dim3(blocks_per_grid_D), dim3(BT), 0, 0, output_value, K, dimension);
CUDA_RETURN_IF_ERROR(hipcub::DeviceRadixSort::SortPairs(temp_storage, temp_bytes, output_value, input_value, output_key, input_key, dimension));
hipLaunchKernelGGL(( FillOutput<T>), dim3(blocks_per_grid_K), dim3(BT), 0, 0, input_key, input_value, output_v, output_i, elem_nums, size, axis, K, i, dimension);
}
}
}
return Status::OK();
}
#define TOPKIMPLE(T) template Status TopKImpl<T>(const CudaKernel* kernel, \
const T* input_x, \
T* output_v, \
int64_t* output_i, \
const int64_t* elem_nums, \
size_t size, \
int64_t axis, \
int64_t K, \
int64_t largest, \
int64_t sorted, \
int64_t N, \
int64_t dimension)
TOPKIMPLE(uint8_t);
TOPKIMPLE(uint16_t);
TOPKIMPLE(uint32_t);
TOPKIMPLE(uint64_t);
TOPKIMPLE(int8_t);
TOPKIMPLE(int16_t);
TOPKIMPLE(int32_t);
TOPKIMPLE(int64_t);
TOPKIMPLE(float);
TOPKIMPLE(double);
} // namespace cuda
} // namespace onnxruntime
|
c376a85758d6b9b1609d85e3d79263f90db60bb0.cu
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "topk_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "device_atomic_functions.h"
#include "cub/cub.cuh"
#include "cub/util_type.cuh"
#include "cub/util_allocator.cuh"
#include "cub/device/device_radix_sort.cuh"
#include <limits>
namespace onnxruntime {
namespace cuda {
using namespace cub;
template <typename T>
struct KV {
T key;
int64_t val;
};
#define BT GridDim::maxThreadsPerBlock
#define ALIGN(N) static_cast<int64_t>(pow(2, ceil(log2(static_cast<double>(N)))))
#define FROM(idx) (left_dim + (idx)*mid_dim + right_dim)
#define TO(idx) (left_dim * K / dimension + (idx)*mid_dim + right_dim)
#define TRIVIAL (1 == largest ? type_min : type_max)
#define BIGGER(n, m) (n.key > m.key ? n : (n.key < m.key ? m : (n.val > m.val ? (1 == largest ? m : n) : (1 == largest ? n : m))))
#define SMALLER(n, m) (n.key < m.key ? n : (n.key > m.key ? m : (n.val < m.val ? (1 == largest ? m : n) : (1 == largest ? n : m))))
#define IS_SMALLER(n, m) (n.key < m.key || !(n.key > m.key) && (1 == largest ? n.val > m.val : n.val < m.val))
#define LESS(n, m) ((n) <= (m) ? (n) : (m))
template <typename T>
__global__ void BitonicTopK(const T* X, T* V, int64_t* I, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t aligned_K, int64_t largest, int64_t sorted, int64_t dimension, int64_t aligned_dimension, T type_min, T type_max) {
auto tid = threadIdx.x;
auto bid = blockIdx.x;
extern __shared__ char shared_mem[];
auto S = (KV<T>*)(shared_mem);
auto mid_dim = axis == size - 1 ? 1 : elem_nums[axis + 1];
auto left_dim = bid / mid_dim * elem_nums[axis];
auto right_dim = axis == size - 1 ? 0 : bid % elem_nums[axis + 1];
for (auto i = tid; i < aligned_dimension; i += blockDim.x) {
S[i].key = i < dimension ? X[FROM(i)] : TRIVIAL;
S[i].val = i;
}
__syncthreads();
//sort each K
for (int64_t len = 1; len < aligned_K; len <<= 1) {
auto dir = len << 1;
for (auto inc = len; inc > 0; inc >>= 1) {
auto low = tid & (inc - 1);
auto i = (tid << 1) - low;
auto j = i + inc;
if (j < aligned_dimension) {
auto reverse = (dir & i) == 0;
auto swap = reverse ^ IS_SMALLER(S[i], S[j]);
if (swap) {
auto tmp = S[i];
S[i] = S[j];
S[j] = tmp;
}
}
__syncthreads();
}
__syncthreads();
}
//merge and rebuild K
for (int64_t len = aligned_K; len < aligned_dimension; len <<= 1) {
auto dir = len << 1;
auto i = (tid << 1) - (tid & (len - 1));
auto j = i + len;
if (i % dir < aligned_K && j < aligned_dimension) {
S[i] = 1 == largest ? BIGGER(S[i], S[j]) : SMALLER(S[i], S[j]);
}
__syncthreads();
for (auto inc = aligned_K >> 1; inc > 0; inc >>= 1) {
auto ii = (tid << 1) - (tid & (inc - 1));
auto jj = ii + inc;
if (ii % dir < aligned_K && jj < aligned_dimension) {
auto reverse = (dir & ii) == 0;
auto swap = reverse ^ IS_SMALLER(S[ii], S[jj]);
if (swap) {
auto tmp = S[ii];
S[ii] = S[jj];
S[jj] = tmp;
}
}
__syncthreads();
}
__syncthreads();
}
//save top K
if (1 == sorted) {
if (1 == largest) {
auto start = aligned_K - K;
if (tid >= start && tid < aligned_K) {
auto to = TO(aligned_K - 1 - tid);
V[to] = S[tid].key;
I[to] = S[tid].val;
}
} else {
if (tid < K) {
auto to = TO(tid);
V[to] = S[tid].key;
I[to] = S[tid].val;
}
}
} else {
if (1 == largest) {
auto start = aligned_K - K;
if (tid < start) {
S[tid].val = aligned_dimension;
}
} else {
if (tid >= K && tid < aligned_K) {
S[tid].val = aligned_dimension;
}
}
__syncthreads();
//sort by index ascending
for (int64_t len = 1; len < aligned_K; len <<= 1) {
auto dir = len << 1;
for (int64_t inc = len; inc > 0; inc >>= 1) {
auto low = tid & (inc - 1);
auto i = (tid << 1) - low;
auto j = i + inc;
if (j < aligned_K) {
auto reverse = (dir & i) == 0;
auto swap = reverse ^ (S[i].val < S[j].val);
if (swap) {
auto tmp = S[i];
S[i] = S[j];
S[j] = tmp;
}
}
__syncthreads();
}
__syncthreads();
}
if (tid < K) {
auto to = TO(tid);
V[to] = S[tid].key;
I[to] = S[tid].val;
}
}
}
template <typename T>
__device__ __inline__ bool Equal(const T& t0, const T& t1) {
auto t2 = t0 > t1 ? t0 - t1 : t1 - t0;
return (double)t2 < 1.0e-5;
}
template<typename T>
__device__ bool SamePrefix(const T* t0, const T* t1, int64_t skip) {
return (((*t0)^(*t1))>>skip) == 0;
}
__device__ bool SamePrefix(const float* f0, const float* f1, int64_t skip) {
return SamePrefix((const int32_t*)f0, (const int32_t*)f1, skip);
}
__device__ bool SamePrefix(const double* d0, const double* d1, int64_t skip) {
return SamePrefix((const int64_t*)d0, (const int64_t*)d1, skip);
}
template<typename T>
__device__ int32_t Radix(const T* t, int64_t skip) {
return ((*t)>>skip)&255;
}
__device__ int32_t Radix(const float* f, int64_t skip) {
return Radix((const int32_t*)f, skip);
}
__device__ int32_t Radix(const double* d, int64_t skip) {
return Radix((const double*)d, skip);
}
template<typename T>
__device__ void SetByte(T* t, int64_t byte) {
(*t) |= byte;
}
__device__ void SetByte(float* f, int64_t byte) {
SetByte((int32_t*)f, byte);
}
__device__ void SetByte(double* d, int64_t byte) {
SetByte((int64_t*)d, byte);
}
template<typename T, int64_t THREADS, int64_t KPT>
__global__ void RadixTopK(const T* X, T* V, int64_t* I, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t dimension, int64_t XPT, T type_min, T type_max) {
auto tid = threadIdx.x;
auto bid = blockIdx.x;
extern __shared__ char shared_mem[];
auto H = (uint32_t*)shared_mem;
auto mid_dim = axis == size - 1 ? 1 : elem_nums[axis + 1];
auto left_dim = bid / mid_dim * elem_nums[axis];
auto right_dim = axis == size - 1 ? 0 : bid % elem_nums[axis + 1];
T Kth = (T)0, sign = (T)1;
typedef BlockScan<uint32_t, THREADS> BlockScan;
typedef BlockReduce<uint32_t, THREADS> BlockReduce;
typedef BlockRadixSort<T, THREADS, KPT, int64_t> BlockRadixSort;
__shared__ union {
typename BlockScan::TempStorage scan;
typename BlockReduce::TempStorage reduce;
typename BlockRadixSort::TempStorage sort;
} temp_storage;
uint32_t positive = 0, negative = 0;
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
T x = X[FROM(x_i)];
if (x > 0) {
++positive;
} else if (x < 0) {
++negative;
}
}
__syncthreads();
positive = BlockReduce(temp_storage.reduce).Sum(positive);
negative = BlockReduce(temp_storage.reduce).Sum(negative);
if (0 == tid) {
H[0] = positive;
H[1] = negative;
}
__syncthreads();
positive = H[0];
negative = H[1];
if ((1 == largest && (K <= positive || dimension - K + 1 <= negative)) ||
(0 == largest && (K <= negative || dimension - K + 1 <= positive))) {
auto KK = K;
if (1 == largest) {
if (KK > positive) {
KK = dimension - KK + 1;
sign = (T)-1;
}
} else {
if (KK > negative) {
KK = dimension - KK + 1;
} else {
sign = (T)-1;
}
}
__syncthreads();
#pragma unroll
for (int64_t byte = sizeof(T)-1; byte > -1; --byte) {
if (tid < 256) H[tid] = 0;
__syncthreads();
auto skip = 8 * byte, prev_skip = 8 * (byte + 1);
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
T x = sign*X[FROM(x_i)];
if (x > 0 && (byte == sizeof(T) - 1 || SamePrefix(&x, &Kth, prev_skip))) {
atomicAdd(&H[Radix(&x, skip)], 1);
}
}
__syncthreads();
for (int64_t radix = 255; radix > 0; --radix) {
if (H[radix] < KK) {
KK -= H[radix];
} else {
SetByte(&Kth, radix<<skip);
break;
}
}
__syncthreads();
}
Kth *= sign;
}
uint32_t superior = 0, equal = 0;
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
auto x = X[FROM(x_i)];
if (1 == largest && x > Kth || 0 == largest && x < Kth) {
++superior;
} else if (Equal(x, Kth)) {
++equal;
}
}
__syncthreads();
auto all_superior = superior;
all_superior = BlockReduce(temp_storage.reduce).Sum(all_superior);
if (0 == tid) {
H[0] = all_superior;
}
__syncthreads();
all_superior = H[0];
BlockScan(temp_storage.scan).ExclusiveSum(superior, superior);
BlockScan(temp_storage.scan).ExclusiveSum(equal, equal);
__syncthreads();
auto equal_quota = K - all_superior - equal;
auto output_i = superior + LESS(K - all_superior, equal);
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
auto x = X[FROM(x_i)];
if (1 == largest && x > Kth || 0 == largest && x < Kth) {
auto to_i = TO(output_i);
V[to_i] = x;
I[to_i] = x_i;
++output_i;
} else if (Equal(x, Kth) && equal_quota > 0) {
auto to_i = TO(output_i);
V[to_i] = x;
I[to_i] = x_i;
++output_i;
--equal_quota;
}
}
__syncthreads();
if (1 == sorted) {
T keys[KPT];
int64_t vals[KPT];
for (int64_t k_i = tid, k_c = 0; k_c < KPT; k_i += blockDim.x, ++k_c) {
if (k_i < K) {
auto to_i = TO(k_i);
keys[k_c] = V[to_i];
vals[k_c] = I[to_i];
} else {
if (1 == largest) {
keys[k_c] = type_min;
} else {
keys[k_c] = type_max;
}
}
}
__syncthreads();
if (1 == largest) {
BlockRadixSort(temp_storage.sort).SortDescending(keys, vals);
} else {
BlockRadixSort(temp_storage.sort).Sort(keys, vals);
}
__syncthreads();
#pragma unroll
for (int64_t k_c = 0; k_c < KPT; ++k_c) {
auto k_i = tid * KPT + k_c;
if (k_i < K) {
auto to_i = TO(k_i);
V[to_i] = keys[k_c];
I[to_i] = vals[k_c];
}
}
}
}
template <typename T>
__global__ void FillInput(const T* input_x, T* output_v, int64_t* output_i, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t offset, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, dimension);
auto left = offset / (axis == size - 1 ? 1 : elem_nums[axis + 1]) * elem_nums[axis];
auto right = axis == size - 1 ? 0 : offset % elem_nums[axis + 1];
auto input_offset = left + id * (axis == size - 1 ? 1 : elem_nums[axis + 1]) + right;
output_v[id] = input_x[input_offset];
output_i[id] = id;
}
template <typename T>
__global__ void FillOutput(const T* input_v, const int64_t* input_i, T* output_v, int64_t* output_i, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t offset, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, K);
auto left = offset / (axis == size - 1 ? 1 : elem_nums[axis + 1]) * elem_nums[axis] * K / dimension;
auto right = axis == size - 1 ? 0 : offset % elem_nums[axis + 1];
auto output_offset = left + id * (axis == size - 1 ? 1 : elem_nums[axis + 1]) + right;
output_v[output_offset] = input_v[id];
output_i[output_offset] = input_i[id];
}
__global__ void ExcludeOutput(int64_t* output_i, int64_t K, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, dimension);
if (id >= K) {
output_i[id] = dimension;
}
}
template <typename T>
Status TopKImpl(const CudaKernel* kernel, const T* input_x, T* output_v, int64_t* output_i, const int64_t* elem_nums, size_t size, int64_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t N, int64_t dimension) {
auto aligned_K = ALIGN(K);
auto aligned_dimension = ALIGN(dimension);
if (aligned_dimension <= GridDim::maxThreadsPerBlock) {
BitonicTopK<T><<<N, GridDim::maxThreadsPerBlock, aligned_dimension * sizeof(KV<T>)>>>(input_x, output_v, output_i, elem_nums, size, axis, K, aligned_K, largest, sorted, dimension, aligned_dimension, std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max());
} else if (K <= BT*16 || 0 == sorted) {
auto XPT = static_cast<int64_t>(ceil(static_cast<double>(dimension) / GridDim::maxThreadsPerBlock));
if (BT*2 >= K || 0 == sorted) {
RadixTopK<T,BT,2><<<N,BT,256*sizeof(uint32_t)>>>(input_x, output_v, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max());
} else if (BT*4>=K) {
RadixTopK<T,BT,4><<<N,BT,256*sizeof(uint32_t)>>>(input_x, output_v, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max());
} else if (BT*8>=K) {
RadixTopK<T,BT,8><<<N,BT,256*sizeof(uint32_t)>>>(input_x, output_v, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max());
} else {
RadixTopK<T,BT,16><<<N,BT,256*sizeof(uint32_t)>>>(input_x, output_v, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max());
}
} else {
auto input_key_buffer = kernel->GetScratchBuffer<T>(dimension);
auto output_key_buffer = kernel->GetScratchBuffer<T>(dimension);
auto input_value_buffer = kernel->GetScratchBuffer<int64_t>(dimension);
auto output_value_buffer = kernel->GetScratchBuffer<int64_t>(dimension);
auto* input_key = input_key_buffer.get();
auto* output_key = output_key_buffer.get();
auto* input_value = input_value_buffer.get();
auto* output_value = output_value_buffer.get();
size_t temp_bytes = 0;
CUDA_RETURN_IF_ERROR(cub::DeviceRadixSort::SortPairs(nullptr, temp_bytes, input_key, output_key, input_value, output_value, dimension));
auto temp_storage_buffer = kernel->GetScratchBuffer<char>(temp_bytes);
auto* temp_storage = temp_storage_buffer.get();
auto blocks_per_grid_D = (int)(ceil(static_cast<float>(dimension) / BT));
auto blocks_per_grid_K = (int)(ceil(static_cast<float>(K) / BT));
for (int64_t i = 0; i < N; i++) {
FillInput<T><<<blocks_per_grid_D, BT, 0>>>(input_x, input_key, input_value, elem_nums, size, axis, K, i, dimension);
CUDA_RETURN_IF_ERROR(1 == largest ? cub::DeviceRadixSort::SortPairsDescending(temp_storage, temp_bytes, input_key, output_key, input_value, output_value, dimension) : cub::DeviceRadixSort::SortPairs(temp_storage, temp_bytes, input_key, output_key, input_value, output_value, dimension));
if (1 == sorted) {
FillOutput<T><<<blocks_per_grid_K, BT, 0>>>(output_key, output_value, output_v, output_i, elem_nums, size, axis, K, i, dimension);
} else { //reorder by ascending index
ExcludeOutput<<<blocks_per_grid_D, BT, 0>>>(output_value, K, dimension);
CUDA_RETURN_IF_ERROR(cub::DeviceRadixSort::SortPairs(temp_storage, temp_bytes, output_value, input_value, output_key, input_key, dimension));
FillOutput<T><<<blocks_per_grid_K, BT, 0>>>(input_key, input_value, output_v, output_i, elem_nums, size, axis, K, i, dimension);
}
}
}
return Status::OK();
}
#define TOPKIMPLE(T) template Status TopKImpl<T>(const CudaKernel* kernel, \
const T* input_x, \
T* output_v, \
int64_t* output_i, \
const int64_t* elem_nums, \
size_t size, \
int64_t axis, \
int64_t K, \
int64_t largest, \
int64_t sorted, \
int64_t N, \
int64_t dimension)
TOPKIMPLE(uint8_t);
TOPKIMPLE(uint16_t);
TOPKIMPLE(uint32_t);
TOPKIMPLE(uint64_t);
TOPKIMPLE(int8_t);
TOPKIMPLE(int16_t);
TOPKIMPLE(int32_t);
TOPKIMPLE(int64_t);
TOPKIMPLE(float);
TOPKIMPLE(double);
} // namespace cuda
} // namespace onnxruntime
|
bb85fee171b7f1a3d16d1e7776b0687ad4a64a49.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// 2016 Juncheng E at PIMS.
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdio.h>
#include <fstream>
#include <sstream>
#include <iostream>
#include <iomanip>
#include <ctime>
#include <stdlib.h>
#include <string.h>
using namespace std;
int count = 0;
int nmax; // maximum atom number
int natom; // actual atom number
int numgrain; // the number of grains
int nx,ny,nz; // number of primitive cells in each direction
float a; // lattice constant
float a2; // lattice constant^2
float lx,ly,lz; // the size of the simulation cell
float *alpha, *beta, *gama;
float3 *gr_centerp; // the centers of each of the grains
float3 *r; // atom positon
float temp,mass; // temperature, mass
int *atom_grain, *atom_neigh;
bool *atom_id;
//float *atom_mini;
//float *drlist;
int3 DIM;
__constant__ float d_lx, d_ly, d_lz, d_a;
__constant__ int d_nmax, d_numgrain, d_nx[2], d_ny[2], d_nz[2], d_natom, d_GBatoms;
int read_config(char* ifn)
{
int i;
ifstream ifile;
string linebuffer;
stringstream ss;
ifile.open(ifn);
cout << "Read " << ifn << "..." << endl;
getline(ifile, linebuffer);
ss << linebuffer;
ss >> a;
a2 = a*a;
ss.str(""); // Clean up ss
ss.clear(); // Clean up ss
getline(ifile, linebuffer);
ss << linebuffer;
ss >> numgrain >> lx >> ly >> lz;
ss.str(""); // Clean up ss
ss.clear(); // Clean up ss
printf("Number of grains: %d\n",numgrain);
gr_centerp = new float3 [numgrain];
alpha = new float [numgrain];
beta = new float [numgrain];
gama = new float [numgrain];
getline(ifile, linebuffer);
ss << linebuffer;
ss >> mass >> temp;
ss.str(""); // Clean up ss
ss.clear(); // Clean up ss
for ( i = 0; i < numgrain; ++i )
{
getline(ifile, linebuffer);
ss << linebuffer;
ss >> gr_centerp[i].x >> gr_centerp[i].y >> gr_centerp[i].z >> alpha[i] >> beta[i] >> gama[i];
// alpha[i] = alpha[i]*M_PI/180.0;
// beta[i] = beta[i]*M_PI/180.0;
// gama[i] = gama[i]*M_PI/180.0;
ss.str(""); // Clean up ss
ss.clear(); // Clean up ss
}
ifile.close();
return 0;
}
__device__ float pos_PBC(float pos, float l)
// This function calculates and returns the positions of the
// atoms with periodic boundary conditions used.
{
float pos_PBC;
if (pos < (0.0))
pos_PBC = pos + l;
else if (pos > (l))
pos_PBC = pos - l;
else
pos_PBC = pos;
return pos_PBC;
}
__device__ float separation_PBC(float ds, float l)
{
float s_PBC;
if (ds > (0.5*l))
s_PBC = ds - l;
else if (ds < (-0.5*l))
s_PBC = ds + l;
else
s_PBC = ds;
return s_PBC;
}
__device__ int getGlobalIdx_3D_3D_l(int l)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z * 4) + (threadIdx.z * (blockDim.x * blockDim.y * 4)) + (threadIdx.y * blockDim.x * 4 )+ threadIdx.x * 4 + l;
return threadId;
}
__device__ int getGlobalIdx_3D_1D() {
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * blockDim.x + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_3D_3D()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z ) + (threadIdx.z * (blockDim.x * blockDim.y )) + (threadIdx.y * blockDim.x )+ threadIdx.x ;
return threadId;
}
__device__ int getGlobalIdx_1D_1D()
{
return blockIdx.x *blockDim.x + threadIdx.x;
}
__device__ int check_position (float3 *d_gr_centerp, float x, float y, float z, int * grain)
// This function checks to see if the atom's position is
// closer to the center of the current grain than it is to
// any other grain. IF so, check is assigned 1. If not, check
// is assigned 0.
{
int i, check;
float r12,r22,dx,dy,dz;
check = 1;
//check if atom is outside the outer periodic image cells
if (x >= 2.0*d_lx || x <= -d_lx)
check = 0;
else if (y >= 2.0*d_ly || y <= -d_ly)
check = 0;
else if (z >= 2.0*d_lz || z <= -d_lz)
check = 0;
if (check == 0) return 0;
dx = d_gr_centerp[*grain].x - x;
dy = d_gr_centerp[*grain].y - y;
dz = d_gr_centerp[*grain].z - z;
//check if atom is nearest to the actual grain center (and not it's image)
if (abs(dx) > 0.5*d_lx)
check = 0;
else if (abs(dy) > 0.5*d_ly)
check = 0;
else if (abs(dz) > 0.5*d_lz)
check = 0;
if (check == 0) return 0;
//check if atom is closest to current grain center
r12 = dx*dx+dy*dy+dz*dz;
for ( i = 0; i < d_numgrain; ++i)
{
if (i == *grain) continue;
dx = d_gr_centerp[i].x - x;
dy = d_gr_centerp[i].y - y;
dz = d_gr_centerp[i].z - z;
dx = separation_PBC(dx,d_lx);
dy = separation_PBC(dy,d_ly);
dz = separation_PBC(dz,d_lz);
r22 = dx*dx+dy*dy+dz*dz;
if (r22 <= r12)
{ check = 0;
break;
}
}
return check;
}
// Heavy calculation
__global__ void assign_initial_positions(float3 *d_gr_centerp, float3 *d_r, float *d_alpha, float *d_beta, float *d_gama, bool *d_atom_id,int *d_grain, int *d_l1)
{
int check,l1;
int i = d_nx[0] + threadIdx.x + blockIdx.x * blockDim.x;
int j = d_ny[0] + threadIdx.y + blockIdx.y * blockDim.y;
int k = d_nz[0] + threadIdx.z + blockIdx.z * blockDim.z;
int n1;
float x1,y1,z1,x_rot,y_rot,z_rot;
float h11,h12,h13;
float h21,h22,h23;
float h31,h32,h33;
float basis[4][3];
float phi1, phi2, phi3;
// Distribution threads here
if ( i < d_nx[1] && j < d_ny[1] && k < d_nz[1] )
{
basis[0][0]=0.00;
basis[0][1]=0.00;
basis[0][2]=0.00;
basis[1][0]=0.50;
basis[1][1]=0.50;
basis[1][2]=0.00;
basis[2][0]=0.00;
basis[2][1]=0.50;
basis[2][2]=0.50;
basis[3][0]=0.50;
basis[3][1]=0.00;
basis[3][2]=0.50;
// Tilt the grains.
phi1 = d_alpha[*d_grain]*M_PI/180.0;
phi2 = d_beta[*d_grain]*M_PI/180.0;
phi3 = d_gama[*d_grain]*M_PI/180.0;
h11=cos(phi1)*cos(phi3)-sin(phi1)*sin(phi3)*cos(phi2);
h12=sin(phi1)*cos(phi3)+cos(phi1)*sin(phi3)*cos(phi2);
h13=sin(phi3)*sin(phi2);
h21=-(cos(phi1)*sin(phi3)+sin(phi1)*cos(phi3)*cos(phi2));
h22=-sin(phi1)*sin(phi3)+cos(phi1)*cos(phi3)*cos(phi2);
h23=cos(phi3)*sin(phi2);
h31=sin(phi1)*sin(phi2);
h32=-cos(phi1)*sin(phi2);
h33=cos(phi2);
l1=*d_l1;
n1 = getGlobalIdx_3D_3D();
x1 = i*d_a + basis[l1][0]*d_a;
y1 = j*d_a + basis[l1][1]*d_a;
z1 = k*d_a + basis[l1][2]*d_a;
x_rot = (x1*h11 + y1*h21 + z1*h31)+ d_gr_centerp[*d_grain].x;
y_rot = (x1*h12 + y1*h22 + z1*h32)+ d_gr_centerp[*d_grain].y;
z_rot = (x1*h13 + y1*h23 + z1*h33)+ d_gr_centerp[*d_grain].z;
check = check_position(d_gr_centerp, x_rot,y_rot,z_rot,d_grain);
if (check == 1)
{
d_r[n1].x = pos_PBC(x_rot,d_lx);
d_r[n1].y = pos_PBC(y_rot,d_ly);
d_r[n1].z = pos_PBC(z_rot,d_lz);
d_atom_id[n1] = 1;
}
}
//__syncthreads();
}
__global__ void get_GBlist(float3 *d_gr_centerp, float3 *d_r, int *d_atom_grain,int *d_atom_neigh, bool *d_tag)
{
int i, j, mygrain;
float dx1, dx2, dx, dy1, dy2, dy, dz1, dz2, dz, r12, r22, r32, r1, r3;
float co, projec, dis;
float d_mini;
i = getGlobalIdx_3D_3D();
if ( i < d_natom)
{
mygrain = d_atom_grain[i];
dx1 = separation_PBC(d_r[i].x - d_gr_centerp[mygrain].x,d_lx);
dy1 = separation_PBC(d_r[i].y - d_gr_centerp[mygrain].y,d_ly);
dz1 = separation_PBC(d_r[i].z - d_gr_centerp[mygrain].z,d_lz);
r12 = dx1*dx1+dy1*dy1+dz1*dz1;
r1 = sqrt(r12);
d_mini = d_a;
for ( j = 0; j < d_numgrain; ++j)
{
if ( j == mygrain ) continue;
dx = separation_PBC(d_r[i].x - d_gr_centerp[j].x,d_lx);
dy = separation_PBC(d_r[i].y - d_gr_centerp[j].y,d_ly);
dz = separation_PBC(d_r[i].z - d_gr_centerp[j].z,d_lz);
r22 = dx*dx+dy*dy+dz*dz;
dx2 = separation_PBC(d_gr_centerp[mygrain].x - d_gr_centerp[j].x,d_lx);
dy2 = separation_PBC(d_gr_centerp[mygrain].y - d_gr_centerp[j].y,d_ly);
dz2 = separation_PBC(d_gr_centerp[mygrain].z - d_gr_centerp[j].z,d_lz);
r32 = dx2*dx2+dy2*dy2+dz2*dz2;
r3 = sqrt(r32);
// What's this?
co = (r12+r32-r22)/2.0/r1/r3;
projec = r1*co;
dis = r3/2.0 - projec;
if (i == 0 && j == 0)
printf("d_natom = %d, dis = %f\n",d_natom,dis);
// if (dis < 0.22*d_a)
if (dis < 0.27*d_a)
{
d_tag[i] = 1;
if (dis < d_mini)
{
d_mini = dis;
d_atom_neigh[i] = j;
}
}
}
}
}
__global__ void clean_grain_boundaries(float3 *d_r, int *d_atom_grain,int *d_atom_neigh, int *d_GBlist, bool *d_tag)
{
int i, j, ii, jj;
float dx, dy, dz, dr2;
float a2 = d_a*d_a;
ii = getGlobalIdx_3D_1D();
// d_tag[d_GBlist[getGlobalIdx_1D_1D()]] = getGlobalIdx_1D_1D();
// if ( ii < d_GBatoms/10 && ii > d_GBatoms/100)
if (ii < d_GBatoms)
{
i = d_GBlist[ii];
for ( jj = ii+1; jj < d_GBatoms; ++jj)
{
j = d_GBlist[jj];
if (d_atom_neigh[i] != d_atom_grain[j] || d_atom_neigh[j] != d_atom_grain[i])
continue;
dx = d_r[i].x - d_r[j].x;
dy = d_r[i].y - d_r[j].y;
dz = d_r[i].z - d_r[j].z;
dx = separation_PBC(dx,d_lx);
dy = separation_PBC(dy,d_ly);
dz = separation_PBC(dz,d_lz);
dr2 = dx*dx+dy*dy+dz*dz;
// if (dr2 <= 0.17*a2)
if (dr2 <= 0.215*a2)
/*if (dr2 <= 0.4761*a2)*/
{
d_tag[i] = 1;
// d_drlist[ii] = dr2;
break;
}
}
}
}
void create_sample()
{
int i, grain,l;
int nx[2],ny[2],nz[2];
int aindex;
float3 *d_gr_centerp, *d_r;
float3 *rr;
// float *d_atom_mini;
int *d_atom_grain, *d_atom_neigh, *d_grain, *d_l1;
float *d_alpha ,*d_beta ,*d_gama;
bool *d_atom_id;
float max_box;
float ratio_x,ratio_y,ratio_z;
if (lx > ly) max_box = lx;
else max_box = ly;
if (lz > max_box)
max_box = lz;
ratio_x = max_box/lx;
nx[0] = int(-0.88*(lx/a)*ratio_x);
nx[1] = int(0.88*(lx/a)*ratio_x);
ratio_y = max_box/ly;
ny[0] = int(-0.88*(ly/a)*ratio_y);
ny[1] = int(0.88*(ly/a)*ratio_y);
ratio_z = max_box/lz;
nz[0] = int(-0.88*(lz/a)*ratio_z);
nz[1] = int(0.88*(lz/a)*ratio_z);
DIM.x = nx[1]-nx[0]+1;
DIM.y = ny[1]-ny[0]+1;
DIM.z = nz[1]-nz[0]+1;
printf("ratio.x = %f, ratio.y = %f, ratio.z = %f\n", ratio_x,ratio_y,ratio_z);
printf("DIM.x = %d, DIM.y = %d, DIM.z = %d\n", DIM.x, DIM.y, DIM.z);
if (DIM.x <10) DIM.x = 10;
if (DIM.y <10) DIM.y = 10;
if (DIM.z <10) DIM.z = 10;
nmax = DIM.x * DIM.y * DIM.z;
printf("Maximum atom number: %d\n", nmax);
r = new float3[nmax/5*4];
rr = new float3[nmax];
atom_id = new bool[nmax];
memset(atom_id, 0, nmax*sizeof(bool));
atom_grain = new int[nmax/5*4];
// atom_neigh = new int[nmax];
// atom_mini = new float[nmax];
clock_t begin = clock();
// CUDA memoray allocation
hipMalloc(&d_r, nmax*sizeof(float3));
hipMalloc(&d_atom_id, nmax*sizeof(bool));
hipMalloc(&d_grain, 1*sizeof(int));
hipMalloc(&d_gr_centerp, numgrain*sizeof(float3));
hipMalloc(&d_alpha, numgrain*sizeof(float));
hipMalloc(&d_beta, numgrain*sizeof(float));
hipMalloc(&d_gama, numgrain*sizeof(float));
hipMalloc(&d_l1, 1*sizeof(int));
// Device initiallization
hipMemcpyToSymbol(d_lx, &lx, sizeof(float));
hipMemcpyToSymbol(d_ly, &ly, sizeof(float));
hipMemcpyToSymbol(d_lz, &lz, sizeof(float));
hipMemcpyToSymbol(d_numgrain, &numgrain, sizeof(int));
hipMemcpyToSymbol(d_nmax, &nmax, sizeof(int));
hipMemcpyToSymbol(d_a, &a, sizeof(float));
hipMemcpyToSymbol(d_nx, nx, 2*sizeof(int));
hipMemcpyToSymbol(d_ny, ny, 2*sizeof(int));
hipMemcpyToSymbol(d_nz, nz, 2*sizeof(int));
hipMemcpy(d_gr_centerp, gr_centerp, numgrain*sizeof(float3), hipMemcpyHostToDevice);
hipMemcpy(d_alpha, alpha, numgrain*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_beta, beta, numgrain*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_gama, gama, numgrain*sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(d_atom_id, atom_id, nmax*sizeof(bool), hipMemcpyHostToDevice);
//if (DIM.z > 640)
//{
// printf("Warning: DIM.z is larger than 640, set DIM.z to 640");
// DIM.z = 640;
//}
dim3 blocks((DIM.x+8-1)/8, (DIM.y+8-1)/8, (DIM.z+8-1)/8);
dim3 threads(8, 8, 8);
// Initlal positions
for ( grain=0; grain < numgrain; ++grain )
{
printf ("%d\n",grain);
hipMemcpy(d_grain, &grain, 1*sizeof(int), hipMemcpyHostToDevice);
//CUDA//
for (l=0;l<4;++l){
hipMemset(d_atom_id, 0, nmax*sizeof(bool));
hipMemcpy(d_l1, &l, 1*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( assign_initial_positions), dim3(blocks), dim3(threads) , 0, 0, d_gr_centerp, d_r, d_alpha, d_beta, d_gama, d_atom_id, d_grain,d_l1);
//CUDA END//
hipMemcpy(rr, d_r, nmax*sizeof(float3), hipMemcpyDeviceToHost);
hipMemcpy(atom_id, d_atom_id, nmax*sizeof(bool), hipMemcpyDeviceToHost);
for (i =0; i < nmax; ++i)
{
if (atom_id[i] == 1)
{
count = count+1;
aindex = count-1;
r[aindex].x = rr[i].x;
r[aindex].y = rr[i].y;
r[aindex].z = rr[i].z;
atom_grain[aindex] = grain;
}
}
}
}
free(rr);
natom = count;
printf ("Initial atom number: %d\n",natom);
clock_t end = clock();
float elapsed_secs = float(end - begin) / CLOCKS_PER_SEC;
cout << "Time elapsed: " << elapsed_secs << " s" << endl;
clock_t begin1 = clock();
//Clean grain boundaries
hipFree(d_r);
hipFree(d_atom_id);
hipFree(d_alpha);
hipFree(d_beta);
hipFree(d_gama);
int * d_GBlist, * GBlist;
int counter2,counter3;
bool *d_tag, *tag;
// float * d_drlist;
int GBatoms = 0;
GBlist = new int[natom];
tag = new bool[natom];
atom_neigh = new int[natom];
// atom_mini = new float[natom];
hipMalloc(&d_r, natom*sizeof(float3));
hipMalloc(&d_atom_grain, natom*sizeof(int));
hipMalloc(&d_atom_neigh, natom*sizeof(int));
// hipMalloc(&d_atom_mini, natom*sizeof(float));
hipMalloc(&d_tag, natom*sizeof(bool));
hipMemset(d_tag, 0, natom*sizeof(bool));
hipMemcpyToSymbol(d_natom, &natom, sizeof(int));
hipMemcpy(d_r, r, natom*sizeof(float3), hipMemcpyHostToDevice);
hipMemcpy(d_atom_grain, atom_grain, natom*sizeof(int), hipMemcpyHostToDevice);
dim3 blocks2((natom+32768-1)/32768, 8, 8);
dim3 threads2(8, 8, 8);
hipLaunchKernelGGL(( get_GBlist) , dim3(blocks2), dim3(threads2) , 0, 0, d_gr_centerp, d_r, d_atom_grain, d_atom_neigh, d_tag);
hipMemcpy(tag, d_tag, natom*sizeof(bool), hipMemcpyDeviceToHost);
for ( i = 0; i < natom; ++i)
{
if (tag[i] == 1)
{
++GBatoms;
GBlist[GBatoms-1] = i;
//cout << i << " " << tag[i] << endl;
}
}
printf ("GBatoms: %d\n",GBatoms);
clock_t end1 = clock();
float elapsed_secs1 = float(end1 - begin1) / CLOCKS_PER_SEC;
cout << "Time elapsed: " << elapsed_secs1 << " s" << endl;
// drlist = new float[GBatoms];
clock_t begin2 = clock();
// hipFree(d_atom_mini);
hipFree(d_gr_centerp);
hipMemset(d_tag, 0, natom*sizeof(bool));
hipMalloc(&d_GBlist, GBatoms*sizeof(int));
hipMemcpy(d_GBlist, GBlist, GBatoms*sizeof(int), hipMemcpyHostToDevice);
// hipMalloc(&d_drlist, GBatoms*sizeof(float));
// hipMemset(d_drlist, 100 , GBatoms*sizeof(float));
for (i=0;i<GBatoms;++i){tag[GBlist[i]]=0;}
// hipMemcpy(d_drlist, drlist, GBatoms*sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(drlist, d_drlist, GBatoms*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(GBlist, d_GBlist, GBatoms*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpyToSymbol(d_GBatoms, &GBatoms, sizeof(int));
// hipMemcpy(tag, d_tag, natom*sizeof(int), hipMemcpyDeviceToHost);
printf("%d %d\n",natom,GBatoms);
// for (i=0;i<GBatoms;++i){cout << tag[GBlist[i]] << " " << drlist[i] << " " << GBlist[i] << endl;}
dim3 blocks3((GBatoms+16384-1)/16384,8,8);
dim3 threads3(256);
hipLaunchKernelGGL(( clean_grain_boundaries) , dim3(blocks3), dim3(threads3) , 0, 0, d_r, d_atom_grain, d_atom_neigh, d_GBlist, d_tag);
//hipError_t error = hipGetLastError();
//printf("CUDA error: %s\n", hipGetErrorString(error));
counter2 = -1;
counter3 = 0;
// hipMemcpy(drlist, d_drlist, GBatoms*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(tag, d_tag, natom*sizeof(bool), hipMemcpyDeviceToHost);
// for (i=0;i<GBatoms;++i){cout << tag[GBlist[i]] << " " << drlist[i] << " " << GBlist[i] << endl;}
// cout << natom << endl;
for (i = 0; i < natom; ++i)
{
if (tag[i] == 0)
{
counter2 = counter2 + 1;
r[counter2].x = r[i].x;
r[counter2].y = r[i].y;
r[counter2].z = r[i].z;
atom_grain[counter2] = atom_grain[i];
// cout << i << " " << tag[i] << " " << counter2 << endl;
}
// else {counter3++;cout << i << " " << tag[i] << " " << counter2 << endl;}
else {counter3++;}
}
natom = counter2+1;
printf ("Atom number (after cleaning): %d %d\n",natom,counter3);
clock_t end2 = clock();
float elapsed_secs2 = float(end2 - begin2) / CLOCKS_PER_SEC;
cout << "Time elapsed: " << elapsed_secs2 << " s" << endl;
}
int write_output_files(char* ofn)
{
int i;
ofstream ofile;
ofile.open(ofn);
ofile << "# Position data for Cu system" << endl;
ofile << "" << endl;
ofile << natom << " atoms" << endl;
ofile << "1 atom types" << endl;
ofile << "" << endl;
ofile << "0 " << lx << " xlo xhi" << endl;
ofile << "0 " << ly << " ylo yhi" << endl;
ofile << "0 " << lz << " zlo zhi" << endl;
ofile << "" << endl;
ofile << "Masses" << endl;
ofile << "" << endl;
ofile << "1 63.55" << endl;
ofile << "" << endl;
ofile << "Atoms" << endl;
ofile << "" << endl;
for ( i = 0; i < natom; ++i)
{
ofile << i+1 << " 1 ";
ofile << setprecision(6) << r[i].x << " " << r[i].y << " " << r[i].z << endl;
}
ofile.close();
return 0;
}
void write_output_cfg(char* ofn)
{
int i;
ofstream ofile;
ofile.open(ofn);
ofile << "Number of particles = " << natom << endl;
ofile << "A = 1 Angstrom (basic length-scale)" << endl;
ofile << "H0(1,1) = " << lx << " A" << endl;
ofile << "H0(1,2) = 0 A" << endl;
ofile << "H0(1,3) = 0 A" << endl;
ofile << "H0(2,1) = 0 A" << endl;
ofile << "H0(2,2) = " << ly << " A" << endl;
ofile << "H0(2,3) = 0 A" << endl;
ofile << "H0(3,1) = 0 A" << endl;
ofile << "H0(3,2) = 0 A" << endl;
ofile << "H0(3,3) = " << lz << " A" << endl;
ofile << ".NO_VELOCITY." << endl;
ofile << "entry_count = 4" << endl;
ofile << "auxiliary[0] = grain" << endl;
ofile << "63.55" << endl;
ofile << "Cu" << endl;
for ( i = 0; i < natom; ++i)
{
ofile << setprecision(5) << r[i].x/lx << " " << r[i].y/ly << " " << r[i].z/lz << " " << atom_grain[i] << endl;
}
ofile.close();
}
int main(int argc, char* argv[])
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
printf("Number of GPU devices: %d\n", deviceCount);
clock_t begin3 = clock();
char* ofn; //output filename
char* ifn; //input filename
if (argc < 3 || strncmp(argv[1],"-h",2) == 0 || strncmp(argv[1],"--help",6) == 0)
{cout << "./ggp input output" << endl;cout << "Example: ./ggp input.txt a.out" << endl;return 0;}
ifn = argv[1];
ofn = argv[2];
read_config(ifn);
create_sample();
clock_t end3 = clock();
float elapsed_secs3 = float(end3 - begin3) / CLOCKS_PER_SEC;
cout << "Total time elapsed: " << elapsed_secs3 << " s" << endl;
cout << "Writing file..." << endl;
// write_output_files(ofn);
write_output_cfg(ofn);
cout << "Done" << endl;
return 0;
}
|
bb85fee171b7f1a3d16d1e7776b0687ad4a64a49.cu
|
// 2016 Juncheng E at PIMS.
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdio.h>
#include <fstream>
#include <sstream>
#include <iostream>
#include <iomanip>
#include <ctime>
#include <stdlib.h>
#include <string.h>
using namespace std;
int count = 0;
int nmax; // maximum atom number
int natom; // actual atom number
int numgrain; // the number of grains
int nx,ny,nz; // number of primitive cells in each direction
float a; // lattice constant
float a2; // lattice constant^2
float lx,ly,lz; // the size of the simulation cell
float *alpha, *beta, *gama;
float3 *gr_centerp; // the centers of each of the grains
float3 *r; // atom positon
float temp,mass; // temperature, mass
int *atom_grain, *atom_neigh;
bool *atom_id;
//float *atom_mini;
//float *drlist;
int3 DIM;
__constant__ float d_lx, d_ly, d_lz, d_a;
__constant__ int d_nmax, d_numgrain, d_nx[2], d_ny[2], d_nz[2], d_natom, d_GBatoms;
int read_config(char* ifn)
{
int i;
ifstream ifile;
string linebuffer;
stringstream ss;
ifile.open(ifn);
cout << "Read " << ifn << "..." << endl;
getline(ifile, linebuffer);
ss << linebuffer;
ss >> a;
a2 = a*a;
ss.str(""); // Clean up ss
ss.clear(); // Clean up ss
getline(ifile, linebuffer);
ss << linebuffer;
ss >> numgrain >> lx >> ly >> lz;
ss.str(""); // Clean up ss
ss.clear(); // Clean up ss
printf("Number of grains: %d\n",numgrain);
gr_centerp = new float3 [numgrain];
alpha = new float [numgrain];
beta = new float [numgrain];
gama = new float [numgrain];
getline(ifile, linebuffer);
ss << linebuffer;
ss >> mass >> temp;
ss.str(""); // Clean up ss
ss.clear(); // Clean up ss
for ( i = 0; i < numgrain; ++i )
{
getline(ifile, linebuffer);
ss << linebuffer;
ss >> gr_centerp[i].x >> gr_centerp[i].y >> gr_centerp[i].z >> alpha[i] >> beta[i] >> gama[i];
// alpha[i] = alpha[i]*M_PI/180.0;
// beta[i] = beta[i]*M_PI/180.0;
// gama[i] = gama[i]*M_PI/180.0;
ss.str(""); // Clean up ss
ss.clear(); // Clean up ss
}
ifile.close();
return 0;
}
__device__ float pos_PBC(float pos, float l)
// This function calculates and returns the positions of the
// atoms with periodic boundary conditions used.
{
float pos_PBC;
if (pos < (0.0))
pos_PBC = pos + l;
else if (pos > (l))
pos_PBC = pos - l;
else
pos_PBC = pos;
return pos_PBC;
}
__device__ float separation_PBC(float ds, float l)
{
float s_PBC;
if (ds > (0.5*l))
s_PBC = ds - l;
else if (ds < (-0.5*l))
s_PBC = ds + l;
else
s_PBC = ds;
return s_PBC;
}
__device__ int getGlobalIdx_3D_3D_l(int l)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z * 4) + (threadIdx.z * (blockDim.x * blockDim.y * 4)) + (threadIdx.y * blockDim.x * 4 )+ threadIdx.x * 4 + l;
return threadId;
}
__device__ int getGlobalIdx_3D_1D() {
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * blockDim.x + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_3D_3D()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z ) + (threadIdx.z * (blockDim.x * blockDim.y )) + (threadIdx.y * blockDim.x )+ threadIdx.x ;
return threadId;
}
__device__ int getGlobalIdx_1D_1D()
{
return blockIdx.x *blockDim.x + threadIdx.x;
}
__device__ int check_position (float3 *d_gr_centerp, float x, float y, float z, int * grain)
// This function checks to see if the atom's position is
// closer to the center of the current grain than it is to
// any other grain. IF so, check is assigned 1. If not, check
// is assigned 0.
{
int i, check;
float r12,r22,dx,dy,dz;
check = 1;
//check if atom is outside the outer periodic image cells
if (x >= 2.0*d_lx || x <= -d_lx)
check = 0;
else if (y >= 2.0*d_ly || y <= -d_ly)
check = 0;
else if (z >= 2.0*d_lz || z <= -d_lz)
check = 0;
if (check == 0) return 0;
dx = d_gr_centerp[*grain].x - x;
dy = d_gr_centerp[*grain].y - y;
dz = d_gr_centerp[*grain].z - z;
//check if atom is nearest to the actual grain center (and not it's image)
if (abs(dx) > 0.5*d_lx)
check = 0;
else if (abs(dy) > 0.5*d_ly)
check = 0;
else if (abs(dz) > 0.5*d_lz)
check = 0;
if (check == 0) return 0;
//check if atom is closest to current grain center
r12 = dx*dx+dy*dy+dz*dz;
for ( i = 0; i < d_numgrain; ++i)
{
if (i == *grain) continue;
dx = d_gr_centerp[i].x - x;
dy = d_gr_centerp[i].y - y;
dz = d_gr_centerp[i].z - z;
dx = separation_PBC(dx,d_lx);
dy = separation_PBC(dy,d_ly);
dz = separation_PBC(dz,d_lz);
r22 = dx*dx+dy*dy+dz*dz;
if (r22 <= r12)
{ check = 0;
break;
}
}
return check;
}
// Heavy calculation
__global__ void assign_initial_positions(float3 *d_gr_centerp, float3 *d_r, float *d_alpha, float *d_beta, float *d_gama, bool *d_atom_id,int *d_grain, int *d_l1)
{
int check,l1;
int i = d_nx[0] + threadIdx.x + blockIdx.x * blockDim.x;
int j = d_ny[0] + threadIdx.y + blockIdx.y * blockDim.y;
int k = d_nz[0] + threadIdx.z + blockIdx.z * blockDim.z;
int n1;
float x1,y1,z1,x_rot,y_rot,z_rot;
float h11,h12,h13;
float h21,h22,h23;
float h31,h32,h33;
float basis[4][3];
float phi1, phi2, phi3;
// Distribution threads here
if ( i < d_nx[1] && j < d_ny[1] && k < d_nz[1] )
{
basis[0][0]=0.00;
basis[0][1]=0.00;
basis[0][2]=0.00;
basis[1][0]=0.50;
basis[1][1]=0.50;
basis[1][2]=0.00;
basis[2][0]=0.00;
basis[2][1]=0.50;
basis[2][2]=0.50;
basis[3][0]=0.50;
basis[3][1]=0.00;
basis[3][2]=0.50;
// Tilt the grains.
phi1 = d_alpha[*d_grain]*M_PI/180.0;
phi2 = d_beta[*d_grain]*M_PI/180.0;
phi3 = d_gama[*d_grain]*M_PI/180.0;
h11=cos(phi1)*cos(phi3)-sin(phi1)*sin(phi3)*cos(phi2);
h12=sin(phi1)*cos(phi3)+cos(phi1)*sin(phi3)*cos(phi2);
h13=sin(phi3)*sin(phi2);
h21=-(cos(phi1)*sin(phi3)+sin(phi1)*cos(phi3)*cos(phi2));
h22=-sin(phi1)*sin(phi3)+cos(phi1)*cos(phi3)*cos(phi2);
h23=cos(phi3)*sin(phi2);
h31=sin(phi1)*sin(phi2);
h32=-cos(phi1)*sin(phi2);
h33=cos(phi2);
l1=*d_l1;
n1 = getGlobalIdx_3D_3D();
x1 = i*d_a + basis[l1][0]*d_a;
y1 = j*d_a + basis[l1][1]*d_a;
z1 = k*d_a + basis[l1][2]*d_a;
x_rot = (x1*h11 + y1*h21 + z1*h31)+ d_gr_centerp[*d_grain].x;
y_rot = (x1*h12 + y1*h22 + z1*h32)+ d_gr_centerp[*d_grain].y;
z_rot = (x1*h13 + y1*h23 + z1*h33)+ d_gr_centerp[*d_grain].z;
check = check_position(d_gr_centerp, x_rot,y_rot,z_rot,d_grain);
if (check == 1)
{
d_r[n1].x = pos_PBC(x_rot,d_lx);
d_r[n1].y = pos_PBC(y_rot,d_ly);
d_r[n1].z = pos_PBC(z_rot,d_lz);
d_atom_id[n1] = 1;
}
}
//__syncthreads();
}
__global__ void get_GBlist(float3 *d_gr_centerp, float3 *d_r, int *d_atom_grain,int *d_atom_neigh, bool *d_tag)
{
int i, j, mygrain;
float dx1, dx2, dx, dy1, dy2, dy, dz1, dz2, dz, r12, r22, r32, r1, r3;
float co, projec, dis;
float d_mini;
i = getGlobalIdx_3D_3D();
if ( i < d_natom)
{
mygrain = d_atom_grain[i];
dx1 = separation_PBC(d_r[i].x - d_gr_centerp[mygrain].x,d_lx);
dy1 = separation_PBC(d_r[i].y - d_gr_centerp[mygrain].y,d_ly);
dz1 = separation_PBC(d_r[i].z - d_gr_centerp[mygrain].z,d_lz);
r12 = dx1*dx1+dy1*dy1+dz1*dz1;
r1 = sqrt(r12);
d_mini = d_a;
for ( j = 0; j < d_numgrain; ++j)
{
if ( j == mygrain ) continue;
dx = separation_PBC(d_r[i].x - d_gr_centerp[j].x,d_lx);
dy = separation_PBC(d_r[i].y - d_gr_centerp[j].y,d_ly);
dz = separation_PBC(d_r[i].z - d_gr_centerp[j].z,d_lz);
r22 = dx*dx+dy*dy+dz*dz;
dx2 = separation_PBC(d_gr_centerp[mygrain].x - d_gr_centerp[j].x,d_lx);
dy2 = separation_PBC(d_gr_centerp[mygrain].y - d_gr_centerp[j].y,d_ly);
dz2 = separation_PBC(d_gr_centerp[mygrain].z - d_gr_centerp[j].z,d_lz);
r32 = dx2*dx2+dy2*dy2+dz2*dz2;
r3 = sqrt(r32);
// What's this?
co = (r12+r32-r22)/2.0/r1/r3;
projec = r1*co;
dis = r3/2.0 - projec;
if (i == 0 && j == 0)
printf("d_natom = %d, dis = %f\n",d_natom,dis);
// if (dis < 0.22*d_a)
if (dis < 0.27*d_a)
{
d_tag[i] = 1;
if (dis < d_mini)
{
d_mini = dis;
d_atom_neigh[i] = j;
}
}
}
}
}
__global__ void clean_grain_boundaries(float3 *d_r, int *d_atom_grain,int *d_atom_neigh, int *d_GBlist, bool *d_tag)
{
int i, j, ii, jj;
float dx, dy, dz, dr2;
float a2 = d_a*d_a;
ii = getGlobalIdx_3D_1D();
// d_tag[d_GBlist[getGlobalIdx_1D_1D()]] = getGlobalIdx_1D_1D();
// if ( ii < d_GBatoms/10 && ii > d_GBatoms/100)
if (ii < d_GBatoms)
{
i = d_GBlist[ii];
for ( jj = ii+1; jj < d_GBatoms; ++jj)
{
j = d_GBlist[jj];
if (d_atom_neigh[i] != d_atom_grain[j] || d_atom_neigh[j] != d_atom_grain[i])
continue;
dx = d_r[i].x - d_r[j].x;
dy = d_r[i].y - d_r[j].y;
dz = d_r[i].z - d_r[j].z;
dx = separation_PBC(dx,d_lx);
dy = separation_PBC(dy,d_ly);
dz = separation_PBC(dz,d_lz);
dr2 = dx*dx+dy*dy+dz*dz;
// if (dr2 <= 0.17*a2)
if (dr2 <= 0.215*a2)
/*if (dr2 <= 0.4761*a2)*/
{
d_tag[i] = 1;
// d_drlist[ii] = dr2;
break;
}
}
}
}
void create_sample()
{
int i, grain,l;
int nx[2],ny[2],nz[2];
int aindex;
float3 *d_gr_centerp, *d_r;
float3 *rr;
// float *d_atom_mini;
int *d_atom_grain, *d_atom_neigh, *d_grain, *d_l1;
float *d_alpha ,*d_beta ,*d_gama;
bool *d_atom_id;
float max_box;
float ratio_x,ratio_y,ratio_z;
if (lx > ly) max_box = lx;
else max_box = ly;
if (lz > max_box)
max_box = lz;
ratio_x = max_box/lx;
nx[0] = int(-0.88*(lx/a)*ratio_x);
nx[1] = int(0.88*(lx/a)*ratio_x);
ratio_y = max_box/ly;
ny[0] = int(-0.88*(ly/a)*ratio_y);
ny[1] = int(0.88*(ly/a)*ratio_y);
ratio_z = max_box/lz;
nz[0] = int(-0.88*(lz/a)*ratio_z);
nz[1] = int(0.88*(lz/a)*ratio_z);
DIM.x = nx[1]-nx[0]+1;
DIM.y = ny[1]-ny[0]+1;
DIM.z = nz[1]-nz[0]+1;
printf("ratio.x = %f, ratio.y = %f, ratio.z = %f\n", ratio_x,ratio_y,ratio_z);
printf("DIM.x = %d, DIM.y = %d, DIM.z = %d\n", DIM.x, DIM.y, DIM.z);
if (DIM.x <10) DIM.x = 10;
if (DIM.y <10) DIM.y = 10;
if (DIM.z <10) DIM.z = 10;
nmax = DIM.x * DIM.y * DIM.z;
printf("Maximum atom number: %d\n", nmax);
r = new float3[nmax/5*4];
rr = new float3[nmax];
atom_id = new bool[nmax];
memset(atom_id, 0, nmax*sizeof(bool));
atom_grain = new int[nmax/5*4];
// atom_neigh = new int[nmax];
// atom_mini = new float[nmax];
clock_t begin = clock();
// CUDA memoray allocation
cudaMalloc(&d_r, nmax*sizeof(float3));
cudaMalloc(&d_atom_id, nmax*sizeof(bool));
cudaMalloc(&d_grain, 1*sizeof(int));
cudaMalloc(&d_gr_centerp, numgrain*sizeof(float3));
cudaMalloc(&d_alpha, numgrain*sizeof(float));
cudaMalloc(&d_beta, numgrain*sizeof(float));
cudaMalloc(&d_gama, numgrain*sizeof(float));
cudaMalloc(&d_l1, 1*sizeof(int));
// Device initiallization
cudaMemcpyToSymbol(d_lx, &lx, sizeof(float));
cudaMemcpyToSymbol(d_ly, &ly, sizeof(float));
cudaMemcpyToSymbol(d_lz, &lz, sizeof(float));
cudaMemcpyToSymbol(d_numgrain, &numgrain, sizeof(int));
cudaMemcpyToSymbol(d_nmax, &nmax, sizeof(int));
cudaMemcpyToSymbol(d_a, &a, sizeof(float));
cudaMemcpyToSymbol(d_nx, nx, 2*sizeof(int));
cudaMemcpyToSymbol(d_ny, ny, 2*sizeof(int));
cudaMemcpyToSymbol(d_nz, nz, 2*sizeof(int));
cudaMemcpy(d_gr_centerp, gr_centerp, numgrain*sizeof(float3), cudaMemcpyHostToDevice);
cudaMemcpy(d_alpha, alpha, numgrain*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_beta, beta, numgrain*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_gama, gama, numgrain*sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_atom_id, atom_id, nmax*sizeof(bool), cudaMemcpyHostToDevice);
//if (DIM.z > 640)
//{
// printf("Warning: DIM.z is larger than 640, set DIM.z to 640");
// DIM.z = 640;
//}
dim3 blocks((DIM.x+8-1)/8, (DIM.y+8-1)/8, (DIM.z+8-1)/8);
dim3 threads(8, 8, 8);
// Initlal positions
for ( grain=0; grain < numgrain; ++grain )
{
printf ("%d\n",grain);
cudaMemcpy(d_grain, &grain, 1*sizeof(int), cudaMemcpyHostToDevice);
//CUDA//
for (l=0;l<4;++l){
cudaMemset(d_atom_id, 0, nmax*sizeof(bool));
cudaMemcpy(d_l1, &l, 1*sizeof(int), cudaMemcpyHostToDevice);
assign_initial_positions<<< blocks, threads >>>(d_gr_centerp, d_r, d_alpha, d_beta, d_gama, d_atom_id, d_grain,d_l1);
//CUDA END//
cudaMemcpy(rr, d_r, nmax*sizeof(float3), cudaMemcpyDeviceToHost);
cudaMemcpy(atom_id, d_atom_id, nmax*sizeof(bool), cudaMemcpyDeviceToHost);
for (i =0; i < nmax; ++i)
{
if (atom_id[i] == 1)
{
count = count+1;
aindex = count-1;
r[aindex].x = rr[i].x;
r[aindex].y = rr[i].y;
r[aindex].z = rr[i].z;
atom_grain[aindex] = grain;
}
}
}
}
free(rr);
natom = count;
printf ("Initial atom number: %d\n",natom);
clock_t end = clock();
float elapsed_secs = float(end - begin) / CLOCKS_PER_SEC;
cout << "Time elapsed: " << elapsed_secs << " s" << endl;
clock_t begin1 = clock();
//Clean grain boundaries
cudaFree(d_r);
cudaFree(d_atom_id);
cudaFree(d_alpha);
cudaFree(d_beta);
cudaFree(d_gama);
int * d_GBlist, * GBlist;
int counter2,counter3;
bool *d_tag, *tag;
// float * d_drlist;
int GBatoms = 0;
GBlist = new int[natom];
tag = new bool[natom];
atom_neigh = new int[natom];
// atom_mini = new float[natom];
cudaMalloc(&d_r, natom*sizeof(float3));
cudaMalloc(&d_atom_grain, natom*sizeof(int));
cudaMalloc(&d_atom_neigh, natom*sizeof(int));
// cudaMalloc(&d_atom_mini, natom*sizeof(float));
cudaMalloc(&d_tag, natom*sizeof(bool));
cudaMemset(d_tag, 0, natom*sizeof(bool));
cudaMemcpyToSymbol(d_natom, &natom, sizeof(int));
cudaMemcpy(d_r, r, natom*sizeof(float3), cudaMemcpyHostToDevice);
cudaMemcpy(d_atom_grain, atom_grain, natom*sizeof(int), cudaMemcpyHostToDevice);
dim3 blocks2((natom+32768-1)/32768, 8, 8);
dim3 threads2(8, 8, 8);
get_GBlist <<< blocks2, threads2 >>> (d_gr_centerp, d_r, d_atom_grain, d_atom_neigh, d_tag);
cudaMemcpy(tag, d_tag, natom*sizeof(bool), cudaMemcpyDeviceToHost);
for ( i = 0; i < natom; ++i)
{
if (tag[i] == 1)
{
++GBatoms;
GBlist[GBatoms-1] = i;
//cout << i << " " << tag[i] << endl;
}
}
printf ("GBatoms: %d\n",GBatoms);
clock_t end1 = clock();
float elapsed_secs1 = float(end1 - begin1) / CLOCKS_PER_SEC;
cout << "Time elapsed: " << elapsed_secs1 << " s" << endl;
// drlist = new float[GBatoms];
clock_t begin2 = clock();
// cudaFree(d_atom_mini);
cudaFree(d_gr_centerp);
cudaMemset(d_tag, 0, natom*sizeof(bool));
cudaMalloc(&d_GBlist, GBatoms*sizeof(int));
cudaMemcpy(d_GBlist, GBlist, GBatoms*sizeof(int), cudaMemcpyHostToDevice);
// cudaMalloc(&d_drlist, GBatoms*sizeof(float));
// cudaMemset(d_drlist, 100 , GBatoms*sizeof(float));
for (i=0;i<GBatoms;++i){tag[GBlist[i]]=0;}
// cudaMemcpy(d_drlist, drlist, GBatoms*sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(drlist, d_drlist, GBatoms*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(GBlist, d_GBlist, GBatoms*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpyToSymbol(d_GBatoms, &GBatoms, sizeof(int));
// cudaMemcpy(tag, d_tag, natom*sizeof(int), cudaMemcpyDeviceToHost);
printf("%d %d\n",natom,GBatoms);
// for (i=0;i<GBatoms;++i){cout << tag[GBlist[i]] << " " << drlist[i] << " " << GBlist[i] << endl;}
dim3 blocks3((GBatoms+16384-1)/16384,8,8);
dim3 threads3(256);
clean_grain_boundaries <<< blocks3, threads3 >>> (d_r, d_atom_grain, d_atom_neigh, d_GBlist, d_tag);
//cudaError_t error = cudaGetLastError();
//printf("CUDA error: %s\n", cudaGetErrorString(error));
counter2 = -1;
counter3 = 0;
// cudaMemcpy(drlist, d_drlist, GBatoms*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(tag, d_tag, natom*sizeof(bool), cudaMemcpyDeviceToHost);
// for (i=0;i<GBatoms;++i){cout << tag[GBlist[i]] << " " << drlist[i] << " " << GBlist[i] << endl;}
// cout << natom << endl;
for (i = 0; i < natom; ++i)
{
if (tag[i] == 0)
{
counter2 = counter2 + 1;
r[counter2].x = r[i].x;
r[counter2].y = r[i].y;
r[counter2].z = r[i].z;
atom_grain[counter2] = atom_grain[i];
// cout << i << " " << tag[i] << " " << counter2 << endl;
}
// else {counter3++;cout << i << " " << tag[i] << " " << counter2 << endl;}
else {counter3++;}
}
natom = counter2+1;
printf ("Atom number (after cleaning): %d %d\n",natom,counter3);
clock_t end2 = clock();
float elapsed_secs2 = float(end2 - begin2) / CLOCKS_PER_SEC;
cout << "Time elapsed: " << elapsed_secs2 << " s" << endl;
}
int write_output_files(char* ofn)
{
int i;
ofstream ofile;
ofile.open(ofn);
ofile << "# Position data for Cu system" << endl;
ofile << "" << endl;
ofile << natom << " atoms" << endl;
ofile << "1 atom types" << endl;
ofile << "" << endl;
ofile << "0 " << lx << " xlo xhi" << endl;
ofile << "0 " << ly << " ylo yhi" << endl;
ofile << "0 " << lz << " zlo zhi" << endl;
ofile << "" << endl;
ofile << "Masses" << endl;
ofile << "" << endl;
ofile << "1 63.55" << endl;
ofile << "" << endl;
ofile << "Atoms" << endl;
ofile << "" << endl;
for ( i = 0; i < natom; ++i)
{
ofile << i+1 << " 1 ";
ofile << setprecision(6) << r[i].x << " " << r[i].y << " " << r[i].z << endl;
}
ofile.close();
return 0;
}
void write_output_cfg(char* ofn)
{
int i;
ofstream ofile;
ofile.open(ofn);
ofile << "Number of particles = " << natom << endl;
ofile << "A = 1 Angstrom (basic length-scale)" << endl;
ofile << "H0(1,1) = " << lx << " A" << endl;
ofile << "H0(1,2) = 0 A" << endl;
ofile << "H0(1,3) = 0 A" << endl;
ofile << "H0(2,1) = 0 A" << endl;
ofile << "H0(2,2) = " << ly << " A" << endl;
ofile << "H0(2,3) = 0 A" << endl;
ofile << "H0(3,1) = 0 A" << endl;
ofile << "H0(3,2) = 0 A" << endl;
ofile << "H0(3,3) = " << lz << " A" << endl;
ofile << ".NO_VELOCITY." << endl;
ofile << "entry_count = 4" << endl;
ofile << "auxiliary[0] = grain" << endl;
ofile << "63.55" << endl;
ofile << "Cu" << endl;
for ( i = 0; i < natom; ++i)
{
ofile << setprecision(5) << r[i].x/lx << " " << r[i].y/ly << " " << r[i].z/lz << " " << atom_grain[i] << endl;
}
ofile.close();
}
int main(int argc, char* argv[])
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
printf("Number of GPU devices: %d\n", deviceCount);
clock_t begin3 = clock();
char* ofn; //output filename
char* ifn; //input filename
if (argc < 3 || strncmp(argv[1],"-h",2) == 0 || strncmp(argv[1],"--help",6) == 0)
{cout << "./ggp input output" << endl;cout << "Example: ./ggp input.txt a.out" << endl;return 0;}
ifn = argv[1];
ofn = argv[2];
read_config(ifn);
create_sample();
clock_t end3 = clock();
float elapsed_secs3 = float(end3 - begin3) / CLOCKS_PER_SEC;
cout << "Total time elapsed: " << elapsed_secs3 << " s" << endl;
cout << "Writing file..." << endl;
// write_output_files(ofn);
write_output_cfg(ofn);
cout << "Done" << endl;
return 0;
}
|
fa5ae85fb7ff7254a64c37bd44fdca7b87d707f6.hip
|
// !!! This is a file automatically generated by hipify!!!
/***********************************************************
By Huahua Wang, the University of Minnesota, twin cities
***********************************************************/
#include <stdio.h>
#include "badmm_kernel.cuh"
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include "rocblas.h"
#include <math.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <cusolverDn.h>
//#define MAX_GRID_SIZE 65535
//#define MAX_BLOCK_SIZE 1024
typedef struct GPUInfo
{
unsigned int MAX_GRID_SIZE;
unsigned int MAX_BLOCK_SIZE;
}GPUInfo;
typedef struct ADMM_para
{
float rho; // penalty parameter
float* iter_obj;
float* iter_time;
float* iter_err;
unsigned int MAX_ITER; // MAX_ITER
float tol;
float abstol;
float reltol;
float lambda;
}ADMM_para;
typedef struct BADMM_massTrans
{
int m;
int n;
int N;
// we'll call our matrix A
float* A; // row major order
float* a;
float* b;
float g2;
float g3;
int print_step;
bool SAVEFILE;
}BADMM_massTrans;
void matInit(float* &X, unsigned int size, float value);
/*
* Thrust update functions for tranformations
*/
struct B_update {
const float N_loc;
B_update(float _N_loc) : N_loc(_N_loc) {}
__host__ __device__
float operator()(thrust::tuple<float,float,float,float,float> t) {
float x1, x2, x3, a, u;
thrust::tie(x1, x2, x3, a, u) = t;
return (((x1 + x2 + x3) / 3.0f) - (a / N_loc) + u);
}
};
struct X1_update {
const float lamb;
X1_update(float _lamb) : lamb(_lamb) {}
__host__ __device__
float operator()(const float &x1, const float &u) {
return ((1.0f / (1.0f + lamb)) * (x1 - u));
}
};
struct X2_update {
const float temp;
X2_update(float _temp) : temp(_temp) {}
__host__ __device__
float operator()(const float &x2, const float &u) {
float v = x2 - u;
float ans = (v - temp > 0.0f? v - temp : 0.0f) - (-v - temp > 0.0f? -v - temp : 0.0f);
return ans;
}
};
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*********************************************
Bregman ADMM for mass transportation problem
All matrices are in row major order
**********************************************/
void gpuBADMM_MT( BADMM_massTrans* &badmm_mt, ADMM_para* &badmm_para, GPUInfo* gpu_info)
{
float *X_1, *X_2, *X_3, *U, *B; // host (boyd code)
// device
float *d_A, *d_X_1, *d_X_2, *d_X_3, *d_U;
float *d_z, *d_z_old, *d_Xmean, *d_X;
float *d_svd_U, *d_svd_S, *d_svd_VH, *d_temp ;
unsigned int m,n,N;
m = badmm_mt->m;
n = badmm_mt->n;
N = badmm_mt->N;
unsigned long int size = m*n;
float fill_value = 0.0f;
// local variables below for updates
// set g2_max = norm(A(:), inf);
// set g3_max = norm(A);
// THEN UNCOMMENT
// badmm_mt->g2 = 0.15 * g2_max;
// badmm_mt->g3 = 0.15 * g3_max;
// Let's hard code correct values from boyd for now
badmm_mt->g2 = 0.14999999999;
badmm_mt->g3 = 206.356410537;
// GPU matrix
hipMalloc(&d_X_1, size*sizeof(float));
hipMalloc(&d_X_2, size*sizeof(float));
hipMalloc(&d_X_3, size*sizeof(float));
hipMalloc(&d_A, size*sizeof(float));
hipMalloc(&d_U, size*sizeof(float));
hipMalloc(&d_svd_U, m*m*sizeof(float));
hipMalloc(&d_svd_S, n*sizeof(float)); // size of min(m,n); cublasgesvd works for m>n
hipMalloc(&d_svd_VH, n*n*sizeof(float));
hipMalloc(&d_temp, size*sizeof(float));
printf("Copying data from CPU to GPU ...\n");
// copy A to GPU
hipMemcpy(d_A, badmm_mt->A, sizeof(float)*size, hipMemcpyHostToDevice);
// direct device allocation
// this should be done on device kernel directly.
thrust::device_ptr<float> dp_X1(d_X_1);
thrust::device_ptr<float> dp_X2(d_X_2);
thrust::device_ptr<float> dp_X3(d_X_3);
thrust::device_ptr<float> dp_A(d_A);
thrust::device_ptr<float> dp_U(d_U);
// dont know if this initialization is compulsory.
thrust::fill(dp_X1, dp_X1 + size, fill_value);
thrust::fill(dp_X2, dp_X2 + size, fill_value);
thrust::fill(dp_X3, dp_X3 + size, fill_value);
thrust::fill(dp_U, dp_U + size, fill_value);
// if necessary add SVD matrices initialization here.
// grid and block size
unsigned int block_size = size > gpu_info->MAX_BLOCK_SIZE ? gpu_info->MAX_BLOCK_SIZE : size;
unsigned long int n_blocks = (int) (size+block_size-1)/block_size;
if(n_blocks > gpu_info->MAX_GRID_SIZE) n_blocks = gpu_info->MAX_GRID_SIZE;
printf("Block size %f b_blocks %f\n", block_size, n_blocks);
unsigned int stride = block_size*n_blocks;
printf("nblcoks = %d, block_size = %d, size = %d, stride = %d\n", n_blocks, block_size, size, stride);
printf("BregmanADMM for mass transportation is running ...\n");
hipblasInit();
float iter_obj;
int iter, count = 0;
// GPU time
float milliseconds = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
for ( iter = 0; iter < badmm_para->MAX_ITER; iter++ )
{
// update B here as in admm boyd - B and U are the same - we'll use U for both
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(dp_X1, dp_X2, dp_X3, dp_A, dp_U)),
thrust::make_zip_iterator(thrust::make_tuple(dp_X1+size, dp_X2+size, dp_X3+size, dp_A+size, dp_U+size)),
dp_U,
B_update(N)
);
// X_1 update
thrust::transform(
dp_X1, dp_X1 + size, dp_U, dp_X1, X1_update(badmm_para->lambda)
);
// X_2 update
float temp = badmm_para->lambda * badmm_mt->g2;
thrust::transform(
dp_X2, dp_X2 + size, dp_U, dp_X2, X2_update(temp)
);
// X_3 update
// perform X_3 - B and store in X_3
thrust::transform(
dp_X3, dp_X3 + size, dp_U, dp_X3, thrust::minus<float>()
);
// svd code coming in
// --- CUDA solver initialization
int *devInfo;
//can do a gpuErrchk on all the cuda Mallocs
hipMalloc(&devInfo, sizeof(int));
cusolverStatus_t stat;
hipsolverDnHandle_t solver_handle;
hipsolverDnCreate(&solver_handle);
int work_size = 0;
stat = hipsolverDnSgesvd_bufferSize(solver_handle, m, n, &work_size);
if(stat != CUSOLVER_STATUS_SUCCESS ) std::cout << "Initialization of cuSolver failed. \n";
float *work;
gpuErrchk(hipMalloc(&work, work_size * sizeof(float)));
// --- CUDA SVD execution
stat = hipsolverDnSgesvd(solver_handle, 'A', 'A', m, n, d_X_3, m, d_svd_S, d_svd_U, m, d_svd_VH, n, work, work_size, NULL, devInfo);
hipDeviceSynchronize();
int devInfo_h = 0;
gpuErrchk(hipMemcpy(&devInfo_h, devInfo, sizeof(int), hipMemcpyDeviceToHost));
// std::cout << "devInfo = " << devInfo_h << "\n";
switch(stat){
case CUSOLVER_STATUS_SUCCESS: std::cout << "SVD computation success\n"; break;
case CUSOLVER_STATUS_NOT_INITIALIZED: std::cout << "Library cuSolver not initialized correctly\n"; break;
case CUSOLVER_STATUS_INVALID_VALUE: std::cout << "Invalid parameters passed\n"; break;
case CUSOLVER_STATUS_INTERNAL_ERROR: std::cout << "Internal operation failed\n"; break;
}
// if (devInfo_h == 0 && stat == CUSOLVER_STATUS_SUCCESS) std::cout << "SVD successful\n\n";
// --- Moving the results from device to host
// hipMemcpy(h_S, d_S, N * sizeof(float), hipMemcpyDeviceToHost);
// for(int i = 0; i < N; i++) std::cout << "d_S["<<i<<"] = " << h_S[i] << std::endl;
hipsolverDnDestroy(solver_handle);
// X3 update here
// first calculate the prox_l1 // declare B as NULL here.
// reusing prox_l1 here
/* DO NOT DELETE - THIS IS NOT OLD CODE - JUST NEW CODE NOT READY TO RUN YET
// **************************************************************
// ANYTHING NOT READY TO COMPILE IS BELOW; EVERYTHING ABOVE SHOULD COMPILE AND RUN
// KEEP MOVING THIS START OF COMMENT TO TEST MORE STUFF FOR COMPILATION
X2_update<<<n_blocks, block_size>>>(d_svd_S, NULL, badmm_para->lambda * badmm_para->g3, size );
// TODO- this has to be updated
X3_update<<<n_blocks, block_size>>>(d_X_3, d_svd_U, d_svd_S, d_svd_VH, size);
// Concat all the X_i's to X for termination checks
concat_X<<<n_blocks, block_size>>>(d_X, d_X_1, d_X_2, d_X_3, badmm_mt->N, size);
hipblasScopy(size, d_z_old, 1, d_z, 1);
// % diagnostics, reporting, termination checks
// matlab code continue here.
// // matric vector multiplication, probably not required.
// hipblasSgemv( 'T',n,m, 1.0,d_X,n,col_ones,1, 0,d_rowSum,1); // fortran, column-major
// if (badmm_mt->a)
// rowNorm_a<<<n_blocks,block_size>>>(d_X, d_rowSum, d_a, size, n);
// else
// rowNorm<<<n_blocks,block_size>>>(d_X, d_rowSum, size, n);
// Z update
// this line also uses average of the three Xis,
// change this to cuda code
// z = x + repmat(-avg(X_1, X_2, X_3) + A./N, 1, N);
//zexp<<<n_blocks,block_size>>>( d_Z, d_X, d_Y, size);
// U - update
hipblasScopy(size, d_U, 1, d_B, 1);
// matrix vector multiplication
hipblasSgemv('N',n,m, 1.0,d_Z,n,row_ones,1, 0.0,d_colSum,1);
if (badmm_mt->b)
colNorm_b<<<n_blocks,block_size>>>(d_Z, d_colSum, d_b, size, n);
else
colNorm<<<n_blocks,block_size>>>(d_Z, d_colSum, size, n);
// dual update
dual<<<n_blocks,block_size>>>( d_Yerr, d_Y, d_X, d_Z, size);
// check stopping conditions
dev_ptr = thrust::device_pointer_cast(d_X);
dev_ptr1 = thrust::device_pointer_cast(d_Xold);
Xerr = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(dev_ptr, dev_ptr1)), thrust::make_zip_iterator(thrust::make_tuple(dev_ptr+size, dev_ptr1+size)), zdiffsq(), 0.0f, thrust::plus<float>());
dev_ptr = thrust::device_pointer_cast(d_X);
// for relative err condition
// iternorm = thrust::inner_product(dev_ptr, dev_ptr+size, dev_ptr, 0.0f);
// Xerr = sqrt(Xerr/iternorm);
dev_ptr = thrust::device_pointer_cast(d_Yerr);
Yerr = thrust::reduce(dev_ptr, dev_ptr+stride);
dev_ptr = thrust::device_pointer_cast(d_Y);
// for relative err condition
// iternorm = thrust::inner_product(dev_ptr, dev_ptr+size, dev_ptr, 0.0f);
// Yerr = sqrt(Yerr/iternorm);
if ( Yerr < badmmpara->tol && Xerr < badmmpara->tol ) {
break;
}
if( badmm_mt->print_step && !((iter+1)%badmm_mt->print_step) )
{
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
// calculate primal objective value
dev_ptr = thrust::device_pointer_cast(d_Z);
iter_obj = thrust::inner_product(d_Cptr, d_Cptr+size, dev_ptr, 0.0f);
badmmpara->iter_time[count] = milliseconds;
badmmpara->iter_err[count] = Xerr + Yerr;
badmmpara->iter_obj[count] = iter_obj * (-badmmpara->rho);
count++;
printf("iter = %d, objval = %f, primal_err = %f, dual_err = %f, time = %f\n", iter, iter_obj * (-badmmpara->rho), Xerr, Yerr, milliseconds);
}
// *******************************************************
// NON COMPILED BLOCK ENDS HERE
*/
}
// calculate primal objective value
// dev_ptr = thrust::device_pointer_cast(d_Z);
// iter_obj = thrust::inner_product(d_Cptr, d_Cptr+size, dev_ptr, 0.0f);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
// average X+Z
// hipblasSaxpy (size, 1, d_Z, 1, d_X, 1);
// hipblasSscal( size, 0.5, d_X, 1);
/*
hipStream_t stream;
hipStreamCreate(&stream);
hipMemcpyAsync(X, d_X, sizeof(float)*size, hipMemcpyDeviceToHost,stream);
badmmpara->iter_err[count] = Xerr + Yerr;
badmmpara->iter_time[count] = milliseconds;
badmmpara->iter_obj[count] = iter_obj * (-badmmpara->rho);
printf("iter = %d, objval = %f, Xerr = %f, Yerr = %f, milliseconds:%f\n", iter, iter_obj * (-badmmpara->rho), Xerr, Yerr, milliseconds);
if (badmm_mt->SAVEFILE)
{
char filename[40];
FILE *f;
sprintf(filename, "X_out.dat");
f = fopen(filename, "wb");
fwrite (X,sizeof(float),size,f);
fclose(f);
}
hipFree(d_X);
delete[]X;
*/
hipDeviceReset();
}
int main(const int argc, const char **argv)
{
BADMM_massTrans* badmm_mt = NULL;
badmm_mt = (struct BADMM_massTrans *) malloc( sizeof(struct BADMM_massTrans) );
badmm_mt->print_step = 0; // default: not print
badmm_mt->SAVEFILE = 1; // default: save
// we'll call it A
badmm_mt->A = NULL;
badmm_mt->a = NULL;
badmm_mt->b = NULL;
long size;
int Asize[2];
unsigned int dim;
// dim = 1;
// dim = 5;
// dim = 10;
// dim = 15;
char* str;
if ( argc > 1 ) dim = strtol(argv[1],&str,10);
// dim = dim*1024;
// read file
char filename[40];
FILE *f;
// read A
sprintf(filename, "%dC.dat",dim);
printf("%s", filename);
f = fopen ( filename , "rb" );
if ( f == NULL ) {
printf("Error! Can not find C file!");
return 0;
}
fread(Asize,sizeof(int),2, f);
badmm_mt->m = Asize[0];
badmm_mt->n = Asize[1];
badmm_mt->N = 3;
size = badmm_mt->m*badmm_mt->n;
badmm_mt->A = new float[size];
fread (badmm_mt->A,sizeof(float),size,f);
fclose(f);
printf("Cost Matrix C: rows = %d, cols = %d, total size = %d\n", badmm_mt->m, badmm_mt->n, size);
// DONT NEED FOR RPCA
// DELETE ALL
// read a
sprintf(filename, "%da.dat",dim);
f = fopen ( filename , "rb" );
if ( f != NULL )
{
badmm_mt->a = new float[badmm_mt->m];
fread (badmm_mt->a,sizeof(float),badmm_mt->m,f);
fclose(f);
}
// read b
sprintf(filename, "%db.dat",dim);
f = fopen ( filename , "rb" );
if ( f != NULL )
{
badmm_mt->b = new float[badmm_mt->n];
fread (badmm_mt->b,sizeof(float),badmm_mt->n,f);
fclose(f);
}
// UNTIL HERE
int iter_size;
ADMM_para* badmm_para = NULL;
badmm_para = (struct ADMM_para *) malloc( sizeof(struct ADMM_para) );
// default value
badmm_para->lambda = 1;
badmm_para->rho = 1.0 / badmm_para->lambda;
badmm_para->MAX_ITER = 100;
badmm_para->tol = 1e-4;
badmm_para->abstol = 1e-4;
badmm_para->reltol = 1e-2;
if ( argc > 2 ) badmm_para->rho = strtod(argv[2],&str);
if ( argc > 3 ) badmm_para->MAX_ITER = strtol(argv[3],&str,10);
if ( argc > 4 ) badmm_para->tol = strtod(argv[4],&str);
if ( argc > 5 ) badmm_mt->print_step = strtol(argv[5],&str,10);
if ( argc > 6 ) badmm_mt->SAVEFILE = strtol(argv[6],&str,10);
if ( badmm_para->rho == 0.0 ) badmm_para->rho = 0.001;
if ( badmm_para->MAX_ITER == 0 ) badmm_para->MAX_ITER = 2000;
if ( badmm_para->tol == 0.0 ) badmm_para->tol = 1e-4;
iter_size = 1;
if(badmm_mt->print_step)
{
iter_size = (int)badmm_para->MAX_ITER/badmm_mt->print_step + 1;
}
badmm_para->iter_obj = new float[iter_size];
badmm_para->iter_time = new float[iter_size];
badmm_para->iter_err = new float[iter_size];
printf("Please be patient! Getting GPU information is slow .....\n");
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop,0); // default device
GPUInfo gpu_info;
gpu_info.MAX_GRID_SIZE = prop.maxGridSize[0];
gpu_info.MAX_BLOCK_SIZE = prop.maxThreadsPerBlock;
// if out of GPU memory, return
float mem = (size*5*4+(badmm_mt->m+badmm_mt->n)*3*4+gpu_info.MAX_GRID_SIZE*gpu_info.MAX_BLOCK_SIZE*2*4)/pow(2,30);
float GPUmem = (long)prop.totalGlobalMem/pow(2,30);
printf("gridDim = %d, blockDim = %d, memory required = %fGB, GPU memory = %fGB\n", gpu_info.MAX_GRID_SIZE, gpu_info.MAX_BLOCK_SIZE, mem, GPUmem );
if ( GPUmem < mem )
{
printf("Not enough memory on GPU to solve the problem !\n");
return 0;
}
printf("rho = %f, Max_Iteration = %d, tol = %f, print every %d steps, save result: %d\n", badmm_para->rho, badmm_para->MAX_ITER, badmm_para->tol, badmm_mt->print_step, badmm_mt->SAVEFILE);
gpuBADMM_MT( badmm_mt, badmm_para, &gpu_info);
delete[]badmm_para->iter_err;
delete[]badmm_para->iter_obj;
delete[]badmm_para->iter_time;
free(badmm_para);
if(badmm_mt->A)delete[]badmm_mt->A;
if(badmm_mt->a)delete[]badmm_mt->a;
if(badmm_mt->b)delete[]badmm_mt->b;
free(badmm_mt);
}
void matInit(float* &X, unsigned int size, float value)
{
for ( int i = 0 ; i < size ; i++ )
X[i] = value;
}
|
fa5ae85fb7ff7254a64c37bd44fdca7b87d707f6.cu
|
/***********************************************************
By Huahua Wang, the University of Minnesota, twin cities
***********************************************************/
#include <stdio.h>
#include "badmm_kernel.cuh"
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include "cublas.h"
#include <math.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
//#define MAX_GRID_SIZE 65535
//#define MAX_BLOCK_SIZE 1024
typedef struct GPUInfo
{
unsigned int MAX_GRID_SIZE;
unsigned int MAX_BLOCK_SIZE;
}GPUInfo;
typedef struct ADMM_para
{
float rho; // penalty parameter
float* iter_obj;
float* iter_time;
float* iter_err;
unsigned int MAX_ITER; // MAX_ITER
float tol;
float abstol;
float reltol;
float lambda;
}ADMM_para;
typedef struct BADMM_massTrans
{
int m;
int n;
int N;
// we'll call our matrix A
float* A; // row major order
float* a;
float* b;
float g2;
float g3;
int print_step;
bool SAVEFILE;
}BADMM_massTrans;
void matInit(float* &X, unsigned int size, float value);
/*
* Thrust update functions for tranformations
*/
struct B_update {
const float N_loc;
B_update(float _N_loc) : N_loc(_N_loc) {}
__host__ __device__
float operator()(thrust::tuple<float,float,float,float,float> t) {
float x1, x2, x3, a, u;
thrust::tie(x1, x2, x3, a, u) = t;
return (((x1 + x2 + x3) / 3.0f) - (a / N_loc) + u);
}
};
struct X1_update {
const float lamb;
X1_update(float _lamb) : lamb(_lamb) {}
__host__ __device__
float operator()(const float &x1, const float &u) {
return ((1.0f / (1.0f + lamb)) * (x1 - u));
}
};
struct X2_update {
const float temp;
X2_update(float _temp) : temp(_temp) {}
__host__ __device__
float operator()(const float &x2, const float &u) {
float v = x2 - u;
float ans = (v - temp > 0.0f? v - temp : 0.0f) - (-v - temp > 0.0f? -v - temp : 0.0f);
return ans;
}
};
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*********************************************
Bregman ADMM for mass transportation problem
All matrices are in row major order
**********************************************/
void gpuBADMM_MT( BADMM_massTrans* &badmm_mt, ADMM_para* &badmm_para, GPUInfo* gpu_info)
{
float *X_1, *X_2, *X_3, *U, *B; // host (boyd code)
// device
float *d_A, *d_X_1, *d_X_2, *d_X_3, *d_U;
float *d_z, *d_z_old, *d_Xmean, *d_X;
float *d_svd_U, *d_svd_S, *d_svd_VH, *d_temp ;
unsigned int m,n,N;
m = badmm_mt->m;
n = badmm_mt->n;
N = badmm_mt->N;
unsigned long int size = m*n;
float fill_value = 0.0f;
// local variables below for updates
// set g2_max = norm(A(:), inf);
// set g3_max = norm(A);
// THEN UNCOMMENT
// badmm_mt->g2 = 0.15 * g2_max;
// badmm_mt->g3 = 0.15 * g3_max;
// Let's hard code correct values from boyd for now
badmm_mt->g2 = 0.14999999999;
badmm_mt->g3 = 206.356410537;
// GPU matrix
cudaMalloc(&d_X_1, size*sizeof(float));
cudaMalloc(&d_X_2, size*sizeof(float));
cudaMalloc(&d_X_3, size*sizeof(float));
cudaMalloc(&d_A, size*sizeof(float));
cudaMalloc(&d_U, size*sizeof(float));
cudaMalloc(&d_svd_U, m*m*sizeof(float));
cudaMalloc(&d_svd_S, n*sizeof(float)); // size of min(m,n); cublasgesvd works for m>n
cudaMalloc(&d_svd_VH, n*n*sizeof(float));
cudaMalloc(&d_temp, size*sizeof(float));
printf("Copying data from CPU to GPU ...\n");
// copy A to GPU
cudaMemcpy(d_A, badmm_mt->A, sizeof(float)*size, cudaMemcpyHostToDevice);
// direct device allocation
// this should be done on device kernel directly.
thrust::device_ptr<float> dp_X1(d_X_1);
thrust::device_ptr<float> dp_X2(d_X_2);
thrust::device_ptr<float> dp_X3(d_X_3);
thrust::device_ptr<float> dp_A(d_A);
thrust::device_ptr<float> dp_U(d_U);
// dont know if this initialization is compulsory.
thrust::fill(dp_X1, dp_X1 + size, fill_value);
thrust::fill(dp_X2, dp_X2 + size, fill_value);
thrust::fill(dp_X3, dp_X3 + size, fill_value);
thrust::fill(dp_U, dp_U + size, fill_value);
// if necessary add SVD matrices initialization here.
// grid and block size
unsigned int block_size = size > gpu_info->MAX_BLOCK_SIZE ? gpu_info->MAX_BLOCK_SIZE : size;
unsigned long int n_blocks = (int) (size+block_size-1)/block_size;
if(n_blocks > gpu_info->MAX_GRID_SIZE) n_blocks = gpu_info->MAX_GRID_SIZE;
printf("Block size %f b_blocks %f\n", block_size, n_blocks);
unsigned int stride = block_size*n_blocks;
printf("nblcoks = %d, block_size = %d, size = %d, stride = %d\n", n_blocks, block_size, size, stride);
printf("BregmanADMM for mass transportation is running ...\n");
cublasInit();
float iter_obj;
int iter, count = 0;
// GPU time
float milliseconds = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for ( iter = 0; iter < badmm_para->MAX_ITER; iter++ )
{
// update B here as in admm boyd - B and U are the same - we'll use U for both
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(dp_X1, dp_X2, dp_X3, dp_A, dp_U)),
thrust::make_zip_iterator(thrust::make_tuple(dp_X1+size, dp_X2+size, dp_X3+size, dp_A+size, dp_U+size)),
dp_U,
B_update(N)
);
// X_1 update
thrust::transform(
dp_X1, dp_X1 + size, dp_U, dp_X1, X1_update(badmm_para->lambda)
);
// X_2 update
float temp = badmm_para->lambda * badmm_mt->g2;
thrust::transform(
dp_X2, dp_X2 + size, dp_U, dp_X2, X2_update(temp)
);
// X_3 update
// perform X_3 - B and store in X_3
thrust::transform(
dp_X3, dp_X3 + size, dp_U, dp_X3, thrust::minus<float>()
);
// svd code coming in
// --- CUDA solver initialization
int *devInfo;
//can do a gpuErrchk on all the cuda Mallocs
cudaMalloc(&devInfo, sizeof(int));
cusolverStatus_t stat;
cusolverDnHandle_t solver_handle;
cusolverDnCreate(&solver_handle);
int work_size = 0;
stat = cusolverDnSgesvd_bufferSize(solver_handle, m, n, &work_size);
if(stat != CUSOLVER_STATUS_SUCCESS ) std::cout << "Initialization of cuSolver failed. \n";
float *work;
gpuErrchk(cudaMalloc(&work, work_size * sizeof(float)));
// --- CUDA SVD execution
stat = cusolverDnSgesvd(solver_handle, 'A', 'A', m, n, d_X_3, m, d_svd_S, d_svd_U, m, d_svd_VH, n, work, work_size, NULL, devInfo);
cudaDeviceSynchronize();
int devInfo_h = 0;
gpuErrchk(cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost));
// std::cout << "devInfo = " << devInfo_h << "\n";
switch(stat){
case CUSOLVER_STATUS_SUCCESS: std::cout << "SVD computation success\n"; break;
case CUSOLVER_STATUS_NOT_INITIALIZED: std::cout << "Library cuSolver not initialized correctly\n"; break;
case CUSOLVER_STATUS_INVALID_VALUE: std::cout << "Invalid parameters passed\n"; break;
case CUSOLVER_STATUS_INTERNAL_ERROR: std::cout << "Internal operation failed\n"; break;
}
// if (devInfo_h == 0 && stat == CUSOLVER_STATUS_SUCCESS) std::cout << "SVD successful\n\n";
// --- Moving the results from device to host
// cudaMemcpy(h_S, d_S, N * sizeof(float), cudaMemcpyDeviceToHost);
// for(int i = 0; i < N; i++) std::cout << "d_S["<<i<<"] = " << h_S[i] << std::endl;
cusolverDnDestroy(solver_handle);
// X3 update here
// first calculate the prox_l1 // declare B as NULL here.
// reusing prox_l1 here
/* DO NOT DELETE - THIS IS NOT OLD CODE - JUST NEW CODE NOT READY TO RUN YET
// **************************************************************
// ANYTHING NOT READY TO COMPILE IS BELOW; EVERYTHING ABOVE SHOULD COMPILE AND RUN
// KEEP MOVING THIS START OF COMMENT TO TEST MORE STUFF FOR COMPILATION
X2_update<<<n_blocks, block_size>>>(d_svd_S, NULL, badmm_para->lambda * badmm_para->g3, size );
// TODO- this has to be updated
X3_update<<<n_blocks, block_size>>>(d_X_3, d_svd_U, d_svd_S, d_svd_VH, size);
// Concat all the X_i's to X for termination checks
concat_X<<<n_blocks, block_size>>>(d_X, d_X_1, d_X_2, d_X_3, badmm_mt->N, size);
cublasScopy(size, d_z_old, 1, d_z, 1);
// % diagnostics, reporting, termination checks
// matlab code continue here.
// // matric vector multiplication, probably not required.
// cublasSgemv( 'T',n,m, 1.0,d_X,n,col_ones,1, 0,d_rowSum,1); // fortran, column-major
// if (badmm_mt->a)
// rowNorm_a<<<n_blocks,block_size>>>(d_X, d_rowSum, d_a, size, n);
// else
// rowNorm<<<n_blocks,block_size>>>(d_X, d_rowSum, size, n);
// Z update
// this line also uses average of the three Xis,
// change this to cuda code
// z = x + repmat(-avg(X_1, X_2, X_3) + A./N, 1, N);
//zexp<<<n_blocks,block_size>>>( d_Z, d_X, d_Y, size);
// U - update
cublasScopy(size, d_U, 1, d_B, 1);
// matrix vector multiplication
cublasSgemv('N',n,m, 1.0,d_Z,n,row_ones,1, 0.0,d_colSum,1);
if (badmm_mt->b)
colNorm_b<<<n_blocks,block_size>>>(d_Z, d_colSum, d_b, size, n);
else
colNorm<<<n_blocks,block_size>>>(d_Z, d_colSum, size, n);
// dual update
dual<<<n_blocks,block_size>>>( d_Yerr, d_Y, d_X, d_Z, size);
// check stopping conditions
dev_ptr = thrust::device_pointer_cast(d_X);
dev_ptr1 = thrust::device_pointer_cast(d_Xold);
Xerr = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(dev_ptr, dev_ptr1)), thrust::make_zip_iterator(thrust::make_tuple(dev_ptr+size, dev_ptr1+size)), zdiffsq(), 0.0f, thrust::plus<float>());
dev_ptr = thrust::device_pointer_cast(d_X);
// for relative err condition
// iternorm = thrust::inner_product(dev_ptr, dev_ptr+size, dev_ptr, 0.0f);
// Xerr = sqrt(Xerr/iternorm);
dev_ptr = thrust::device_pointer_cast(d_Yerr);
Yerr = thrust::reduce(dev_ptr, dev_ptr+stride);
dev_ptr = thrust::device_pointer_cast(d_Y);
// for relative err condition
// iternorm = thrust::inner_product(dev_ptr, dev_ptr+size, dev_ptr, 0.0f);
// Yerr = sqrt(Yerr/iternorm);
if ( Yerr < badmmpara->tol && Xerr < badmmpara->tol ) {
break;
}
if( badmm_mt->print_step && !((iter+1)%badmm_mt->print_step) )
{
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
// calculate primal objective value
dev_ptr = thrust::device_pointer_cast(d_Z);
iter_obj = thrust::inner_product(d_Cptr, d_Cptr+size, dev_ptr, 0.0f);
badmmpara->iter_time[count] = milliseconds;
badmmpara->iter_err[count] = Xerr + Yerr;
badmmpara->iter_obj[count] = iter_obj * (-badmmpara->rho);
count++;
printf("iter = %d, objval = %f, primal_err = %f, dual_err = %f, time = %f\n", iter, iter_obj * (-badmmpara->rho), Xerr, Yerr, milliseconds);
}
// *******************************************************
// NON COMPILED BLOCK ENDS HERE
*/
}
// calculate primal objective value
// dev_ptr = thrust::device_pointer_cast(d_Z);
// iter_obj = thrust::inner_product(d_Cptr, d_Cptr+size, dev_ptr, 0.0f);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
// average X+Z
// cublasSaxpy (size, 1, d_Z, 1, d_X, 1);
// cublasSscal( size, 0.5, d_X, 1);
/*
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaMemcpyAsync(X, d_X, sizeof(float)*size, cudaMemcpyDeviceToHost,stream);
badmmpara->iter_err[count] = Xerr + Yerr;
badmmpara->iter_time[count] = milliseconds;
badmmpara->iter_obj[count] = iter_obj * (-badmmpara->rho);
printf("iter = %d, objval = %f, Xerr = %f, Yerr = %f, milliseconds:%f\n", iter, iter_obj * (-badmmpara->rho), Xerr, Yerr, milliseconds);
if (badmm_mt->SAVEFILE)
{
char filename[40];
FILE *f;
sprintf(filename, "X_out.dat");
f = fopen(filename, "wb");
fwrite (X,sizeof(float),size,f);
fclose(f);
}
cudaFree(d_X);
delete[]X;
*/
cudaDeviceReset();
}
int main(const int argc, const char **argv)
{
BADMM_massTrans* badmm_mt = NULL;
badmm_mt = (struct BADMM_massTrans *) malloc( sizeof(struct BADMM_massTrans) );
badmm_mt->print_step = 0; // default: not print
badmm_mt->SAVEFILE = 1; // default: save
// we'll call it A
badmm_mt->A = NULL;
badmm_mt->a = NULL;
badmm_mt->b = NULL;
long size;
int Asize[2];
unsigned int dim;
// dim = 1;
// dim = 5;
// dim = 10;
// dim = 15;
char* str;
if ( argc > 1 ) dim = strtol(argv[1],&str,10);
// dim = dim*1024;
// read file
char filename[40];
FILE *f;
// read A
sprintf(filename, "%dC.dat",dim);
printf("%s", filename);
f = fopen ( filename , "rb" );
if ( f == NULL ) {
printf("Error! Can not find C file!");
return 0;
}
fread(Asize,sizeof(int),2, f);
badmm_mt->m = Asize[0];
badmm_mt->n = Asize[1];
badmm_mt->N = 3;
size = badmm_mt->m*badmm_mt->n;
badmm_mt->A = new float[size];
fread (badmm_mt->A,sizeof(float),size,f);
fclose(f);
printf("Cost Matrix C: rows = %d, cols = %d, total size = %d\n", badmm_mt->m, badmm_mt->n, size);
// DONT NEED FOR RPCA
// DELETE ALL
// read a
sprintf(filename, "%da.dat",dim);
f = fopen ( filename , "rb" );
if ( f != NULL )
{
badmm_mt->a = new float[badmm_mt->m];
fread (badmm_mt->a,sizeof(float),badmm_mt->m,f);
fclose(f);
}
// read b
sprintf(filename, "%db.dat",dim);
f = fopen ( filename , "rb" );
if ( f != NULL )
{
badmm_mt->b = new float[badmm_mt->n];
fread (badmm_mt->b,sizeof(float),badmm_mt->n,f);
fclose(f);
}
// UNTIL HERE
int iter_size;
ADMM_para* badmm_para = NULL;
badmm_para = (struct ADMM_para *) malloc( sizeof(struct ADMM_para) );
// default value
badmm_para->lambda = 1;
badmm_para->rho = 1.0 / badmm_para->lambda;
badmm_para->MAX_ITER = 100;
badmm_para->tol = 1e-4;
badmm_para->abstol = 1e-4;
badmm_para->reltol = 1e-2;
if ( argc > 2 ) badmm_para->rho = strtod(argv[2],&str);
if ( argc > 3 ) badmm_para->MAX_ITER = strtol(argv[3],&str,10);
if ( argc > 4 ) badmm_para->tol = strtod(argv[4],&str);
if ( argc > 5 ) badmm_mt->print_step = strtol(argv[5],&str,10);
if ( argc > 6 ) badmm_mt->SAVEFILE = strtol(argv[6],&str,10);
if ( badmm_para->rho == 0.0 ) badmm_para->rho = 0.001;
if ( badmm_para->MAX_ITER == 0 ) badmm_para->MAX_ITER = 2000;
if ( badmm_para->tol == 0.0 ) badmm_para->tol = 1e-4;
iter_size = 1;
if(badmm_mt->print_step)
{
iter_size = (int)badmm_para->MAX_ITER/badmm_mt->print_step + 1;
}
badmm_para->iter_obj = new float[iter_size];
badmm_para->iter_time = new float[iter_size];
badmm_para->iter_err = new float[iter_size];
printf("Please be patient! Getting GPU information is slow .....\n");
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0); // default device
GPUInfo gpu_info;
gpu_info.MAX_GRID_SIZE = prop.maxGridSize[0];
gpu_info.MAX_BLOCK_SIZE = prop.maxThreadsPerBlock;
// if out of GPU memory, return
float mem = (size*5*4+(badmm_mt->m+badmm_mt->n)*3*4+gpu_info.MAX_GRID_SIZE*gpu_info.MAX_BLOCK_SIZE*2*4)/pow(2,30);
float GPUmem = (long)prop.totalGlobalMem/pow(2,30);
printf("gridDim = %d, blockDim = %d, memory required = %fGB, GPU memory = %fGB\n", gpu_info.MAX_GRID_SIZE, gpu_info.MAX_BLOCK_SIZE, mem, GPUmem );
if ( GPUmem < mem )
{
printf("Not enough memory on GPU to solve the problem !\n");
return 0;
}
printf("rho = %f, Max_Iteration = %d, tol = %f, print every %d steps, save result: %d\n", badmm_para->rho, badmm_para->MAX_ITER, badmm_para->tol, badmm_mt->print_step, badmm_mt->SAVEFILE);
gpuBADMM_MT( badmm_mt, badmm_para, &gpu_info);
delete[]badmm_para->iter_err;
delete[]badmm_para->iter_obj;
delete[]badmm_para->iter_time;
free(badmm_para);
if(badmm_mt->A)delete[]badmm_mt->A;
if(badmm_mt->a)delete[]badmm_mt->a;
if(badmm_mt->b)delete[]badmm_mt->b;
free(badmm_mt);
}
void matInit(float* &X, unsigned int size, float value)
{
for ( int i = 0 ; i < size ; i++ )
X[i] = value;
}
|
eea9b8bffa6bdf08e4cd1d672d0cc8d4a820718e.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright Naoki Shibata and contributors 2010 - 2020.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <inttypes.h>
#include <hip/hip_runtime.h>
#include "sleefquadinline_cuda.h"
#define STDIN_FILENO 0
//
static int startsWith(const char *str, const char *prefix) {
while(*prefix != '\0') if (*str++ != *prefix++) return 0;
return *prefix == '\0';
}
static double u2d(uint64_t u) {
union {
double f;
uint64_t i;
} tmp;
tmp.i = u;
return tmp.f;
}
static uint64_t d2u(double d) {
union {
double f;
uint64_t i;
} tmp;
tmp.f = d;
return tmp.i;
}
//
__global__ void xaddq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_addq1_u05cuda(*a0, *a1); }
__global__ void xsubq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_subq1_u05cuda(*a0, *a1); }
__global__ void xmulq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_mulq1_u05cuda(*a0, *a1); }
__global__ void xdivq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_divq1_u05cuda(*a0, *a1); }
__global__ void xnegq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_negq1_cuda(*a0); }
__global__ void xicmpltq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpltq1_cuda(*a0, *a1); }
__global__ void xicmpgtq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpgtq1_cuda(*a0, *a1); }
__global__ void xicmpleq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpleq1_cuda(*a0, *a1); }
__global__ void xicmpgeq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpgeq1_cuda(*a0, *a1); }
__global__ void xicmpeqq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpeqq1_cuda(*a0, *a1); }
__global__ void xicmpneq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpneq1_cuda(*a0, *a1); }
__global__ void xicmpq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpq1_cuda(*a0, *a1); }
__global__ void xiunordq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_iunordq1_cuda(*a0, *a1); }
__global__ void xcast_from_doubleq(Sleef_quadx1 *r0, double *d0) { *r0 = Sleef_cast_from_doubleq1_cuda(*d0); }
__global__ void xcast_to_doubleq(double *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_doubleq1_cuda(*a0); }
__global__ void xcast_from_int64q(Sleef_quadx1 *r0, int64_t *i0) { *r0 = Sleef_cast_from_int64q1_cuda(*i0); }
__global__ void xcast_to_int64q(int64_t *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_int64q1_cuda(*a0); }
__global__ void xcast_from_uint64q(Sleef_quadx1 *r0, uint64_t *u0) { *r0 = Sleef_cast_from_uint64q1_cuda(*u0); }
__global__ void xcast_to_uint64q(uint64_t *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_uint64q1_cuda(*a0); }
__global__ void xsqrtq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sqrtq1_u05cuda(*a0); }
__global__ void xcbrtq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_cbrtq1_u10cuda(*a0); }
__global__ void xsinq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sinq1_u10cuda(*a0); }
__global__ void xcosq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_cosq1_u10cuda(*a0); }
__global__ void xtanq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_tanq1_u10cuda(*a0); }
__global__ void xasinq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_asinq1_u10cuda(*a0); }
__global__ void xacosq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_acosq1_u10cuda(*a0); }
__global__ void xatanq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_atanq1_u10cuda(*a0); }
__global__ void xatan2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_atan2q1_u10cuda(*a0, *a1); }
__global__ void xexpq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_expq1_u10cuda(*a0); }
__global__ void xexp2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_exp2q1_u10cuda(*a0); }
__global__ void xexp10q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_exp10q1_u10cuda(*a0); }
__global__ void xexpm1q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_expm1q1_u10cuda(*a0); }
__global__ void xlogq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_logq1_u10cuda(*a0); }
__global__ void xlog2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log2q1_u10cuda(*a0); }
__global__ void xlog10q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log10q1_u10cuda(*a0); }
__global__ void xlog1pq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log1pq1_u10cuda(*a0); }
__global__ void xpowq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_powq1_u10cuda(*a0, *a1); }
__global__ void xsinhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sinhq1_u10cuda(*a0); }
__global__ void xcoshq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_coshq1_u10cuda(*a0); }
__global__ void xtanhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_tanhq1_u10cuda(*a0); }
__global__ void xasinhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_asinhq1_u10cuda(*a0); }
__global__ void xacoshq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_acoshq1_u10cuda(*a0); }
__global__ void xatanhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_atanhq1_u10cuda(*a0); }
__global__ void xfabsq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_fabsq1_cuda(*a0); }
__global__ void xcopysignq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_copysignq1_cuda(*a0, *a1); }
__global__ void xfmaxq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fmaxq1_cuda(*a0, *a1); }
__global__ void xfminq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fminq1_cuda(*a0, *a1); }
__global__ void xfdimq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fdimq1_u05cuda(*a0, *a1); }
__global__ void xfmodq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fmodq1_cuda(*a0, *a1); }
__global__ void xremainderq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_remainderq1_cuda(*a0, *a1); }
__global__ void xfrexpq(Sleef_quadx1 *r, Sleef_quadx1 *a0, int *i0) { *r = Sleef_frexpq1_cuda(*a0, i0); }
__global__ void xmodfq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_modfq1_cuda(*a0, a1); }
__global__ void xtruncq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_truncq1_cuda(*a0); }
__global__ void xfloorq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_floorq1_cuda(*a0); }
__global__ void xceilq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_ceilq1_cuda(*a0); }
__global__ void xroundq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_roundq1_cuda(*a0); }
__global__ void xrintq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_rintq1_cuda(*a0); }
//
typedef union {
Sleef_quad q;
struct {
uint64_t l, h;
};
} cnv128;
#define BUFSIZE 1024
#define func_q_q(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, a0); \
hipDeviceSynchronize(); \
c0.q = Sleef_getq1_cuda(*r, 0); \
printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_q_q_q(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0, c1; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l, &c1.h, &c1.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
*a1 = Sleef_setq1_cuda(*a1, 0, c1.q); \
hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, a0, a1); \
hipDeviceSynchronize(); \
c0.q = Sleef_getq1_cuda(*r, 0); \
printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_i_q_q(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0, c1; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l, &c1.h, &c1.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
*a1 = Sleef_setq1_cuda(*a1, 0, c1.q); \
hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, i0, a0, a1); \
hipDeviceSynchronize(); \
printf("%d\n", *i0); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_d_q(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, d0, a0); \
hipDeviceSynchronize(); \
printf("%" PRIx64 "\n", d2u(*d0)); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_q_d(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
uint64_t u; \
sscanf(buf, funcStr " %" PRIx64, &u); \
*d0 = u2d(u); \
hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, d0); \
hipDeviceSynchronize(); \
cnv128 c0; \
c0.q = Sleef_getq1_cuda(*r, 0); \
printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_i64_q(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, i64, a0); \
hipDeviceSynchronize(); \
printf("%" PRIx64 "\n", *i64); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_q_i64(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
sscanf(buf, funcStr " %" PRIx64, i64); \
hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, i64); \
hipDeviceSynchronize(); \
cnv128 c0; \
c0.q = Sleef_getq1_cuda(*r, 0); \
printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_u64_q(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, u64, a0); \
hipDeviceSynchronize(); \
printf("%" PRIx64 "\n", *u64); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_q_u64(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
sscanf(buf, funcStr " %" PRIx64, u64); \
hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, u64); \
hipDeviceSynchronize(); \
cnv128 c0; \
c0.q = Sleef_getq1_cuda(*r, 0); \
printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_q_q_pi(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, a0, i0); \
hipDeviceSynchronize(); \
c0.q = Sleef_getq1_cuda(*r, 0); \
printf("%" PRIx64 ":%" PRIx64 " %d\n", c0.h, c0.l, *i0); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_q_q_pq(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0, c1; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
hipLaunchKernelGGL(( funcName), dim3(1), dim3(1), 0, 0, r, a0, a1); \
hipDeviceSynchronize(); \
c0.q = Sleef_getq1_cuda(*r, 0); \
c1.q = Sleef_getq1_cuda(*a1, 0); \
printf("%" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l, c1.h, c1.l); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
int main(int argc, char **argv) {
#if 0
hipInit(0);
int ndevice;
hipGetDeviceCount(&ndevice);
if (ndevice == 0) {
fprintf(stderr, "No cuda device available\n");
exit(0);
}
hipDevice_t device;
char deviceName[1024];
hipDeviceGet(&device, 0);
hipDeviceGetName(deviceName, 1000, device);
fprintf(stderr, "Device : %s\n", deviceName);
#endif
hipSetDeviceFlags(hipDeviceScheduleSpin);
Sleef_quadx1 *r, *a0, *a1;
double *d0;
int *i0;
int64_t *i64;
uint64_t *u64;
hipMallocManaged(&r , 1*sizeof(Sleef_quadx1));
hipMallocManaged(&a0, 1*sizeof(Sleef_quadx1));
hipMallocManaged(&a1, 1*sizeof(Sleef_quadx1));
hipMallocManaged(&d0, 1*sizeof(double));
hipMallocManaged(&i0, 1*sizeof(int));
hipMallocManaged(&i64, 1*sizeof(int64_t));
hipMallocManaged(&u64, 1*sizeof(uint64_t));
//
printf("1\n");
fflush(stdout);
char buf[BUFSIZE];
fgets(buf, BUFSIZE-1, stdin);
int sentinel = 0;
while(!feof(stdin) && sentinel < 2) {
func_q_q_q("addq_u05", xaddq_u05);
func_q_q_q("subq_u05", xsubq_u05);
func_q_q_q("mulq_u05", xmulq_u05);
func_q_q_q("divq_u05", xdivq_u05);
func_q_q("sqrtq_u05", xsqrtq_u05);
func_q_q("cbrtq_u10", xcbrtq_u10);
func_q_q("sinq_u10", xsinq_u10);
func_q_q("cosq_u10", xcosq_u10);
func_q_q("tanq_u10", xtanq_u10);
func_q_q("asinq_u10", xasinq_u10);
func_q_q("acosq_u10", xacosq_u10);
func_q_q("atanq_u10", xatanq_u10);
func_q_q_q("atan2q_u10", xatan2q_u10);
func_q_q("expq_u10", xexpq_u10);
func_q_q("exp2q_u10", xexp2q_u10);
func_q_q("exp10q_u10", xexp10q_u10);
func_q_q("expm1q_u10", xexpm1q_u10);
func_q_q("logq_u10", xlogq_u10);
func_q_q("log2q_u10", xlog2q_u10);
func_q_q("log10q_u10", xlog10q_u10);
func_q_q("log1pq_u10", xlog1pq_u10);
func_q_q_q("powq_u10", xpowq_u10);
func_q_q("sinhq_u10", xsinhq_u10);
func_q_q("coshq_u10", xcoshq_u10);
func_q_q("tanhq_u10", xtanhq_u10);
func_q_q("asinhq_u10", xasinhq_u10);
func_q_q("acoshq_u10", xacoshq_u10);
func_q_q("atanhq_u10", xatanhq_u10);
func_q_q("negq", xnegq);
func_q_q("fabsq", xfabsq);
func_q_q_q("copysignq", xcopysignq);
func_q_q_q("fmaxq", xfmaxq);
func_q_q_q("fminq", xfminq);
func_q_q_q("fdimq_u05", xfdimq_u05);
func_q_q_q("fmodq", xfmodq);
func_q_q_q("remainderq", xremainderq);
func_q_q_pi("frexpq", xfrexpq);
func_q_q_pq("modfq", xmodfq);
func_q_q("truncq", xtruncq);
func_q_q("floorq", xfloorq);
func_q_q("ceilq", xceilq);
func_q_q("roundq", xroundq);
func_q_q("rintq", xrintq);
func_q_d("cast_from_doubleq", xcast_from_doubleq);
func_d_q("cast_to_doubleq", xcast_to_doubleq);
func_q_i64("cast_from_int64q", xcast_from_int64q);
func_i64_q("cast_to_int64q", xcast_to_int64q);
func_q_u64("cast_from_uint64q", xcast_from_uint64q);
func_u64_q("cast_to_uint64q", xcast_to_uint64q);
func_i_q_q("icmpltq", xicmpltq);
func_i_q_q("icmpgtq", xicmpgtq);
func_i_q_q("icmpleq", xicmpleq);
func_i_q_q("icmpgeq", xicmpgeq);
func_i_q_q("icmpeqq", xicmpeqq);
func_i_q_q("icmpneq", xicmpneq);
func_i_q_q("icmpq", xicmpq);
func_i_q_q("iunordq", xiunordq);
sentinel++;
}
//
return 0;
}
|
eea9b8bffa6bdf08e4cd1d672d0cc8d4a820718e.cu
|
// Copyright Naoki Shibata and contributors 2010 - 2020.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <inttypes.h>
#include <cuda.h>
#include "sleefquadinline_cuda.h"
#define STDIN_FILENO 0
//
static int startsWith(const char *str, const char *prefix) {
while(*prefix != '\0') if (*str++ != *prefix++) return 0;
return *prefix == '\0';
}
static double u2d(uint64_t u) {
union {
double f;
uint64_t i;
} tmp;
tmp.i = u;
return tmp.f;
}
static uint64_t d2u(double d) {
union {
double f;
uint64_t i;
} tmp;
tmp.f = d;
return tmp.i;
}
//
__global__ void xaddq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_addq1_u05cuda(*a0, *a1); }
__global__ void xsubq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_subq1_u05cuda(*a0, *a1); }
__global__ void xmulq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_mulq1_u05cuda(*a0, *a1); }
__global__ void xdivq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_divq1_u05cuda(*a0, *a1); }
__global__ void xnegq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_negq1_cuda(*a0); }
__global__ void xicmpltq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpltq1_cuda(*a0, *a1); }
__global__ void xicmpgtq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpgtq1_cuda(*a0, *a1); }
__global__ void xicmpleq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpleq1_cuda(*a0, *a1); }
__global__ void xicmpgeq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpgeq1_cuda(*a0, *a1); }
__global__ void xicmpeqq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpeqq1_cuda(*a0, *a1); }
__global__ void xicmpneq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpneq1_cuda(*a0, *a1); }
__global__ void xicmpq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpq1_cuda(*a0, *a1); }
__global__ void xiunordq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_iunordq1_cuda(*a0, *a1); }
__global__ void xcast_from_doubleq(Sleef_quadx1 *r0, double *d0) { *r0 = Sleef_cast_from_doubleq1_cuda(*d0); }
__global__ void xcast_to_doubleq(double *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_doubleq1_cuda(*a0); }
__global__ void xcast_from_int64q(Sleef_quadx1 *r0, int64_t *i0) { *r0 = Sleef_cast_from_int64q1_cuda(*i0); }
__global__ void xcast_to_int64q(int64_t *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_int64q1_cuda(*a0); }
__global__ void xcast_from_uint64q(Sleef_quadx1 *r0, uint64_t *u0) { *r0 = Sleef_cast_from_uint64q1_cuda(*u0); }
__global__ void xcast_to_uint64q(uint64_t *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_uint64q1_cuda(*a0); }
__global__ void xsqrtq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sqrtq1_u05cuda(*a0); }
__global__ void xcbrtq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_cbrtq1_u10cuda(*a0); }
__global__ void xsinq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sinq1_u10cuda(*a0); }
__global__ void xcosq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_cosq1_u10cuda(*a0); }
__global__ void xtanq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_tanq1_u10cuda(*a0); }
__global__ void xasinq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_asinq1_u10cuda(*a0); }
__global__ void xacosq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_acosq1_u10cuda(*a0); }
__global__ void xatanq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_atanq1_u10cuda(*a0); }
__global__ void xatan2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_atan2q1_u10cuda(*a0, *a1); }
__global__ void xexpq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_expq1_u10cuda(*a0); }
__global__ void xexp2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_exp2q1_u10cuda(*a0); }
__global__ void xexp10q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_exp10q1_u10cuda(*a0); }
__global__ void xexpm1q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_expm1q1_u10cuda(*a0); }
__global__ void xlogq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_logq1_u10cuda(*a0); }
__global__ void xlog2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log2q1_u10cuda(*a0); }
__global__ void xlog10q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log10q1_u10cuda(*a0); }
__global__ void xlog1pq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log1pq1_u10cuda(*a0); }
__global__ void xpowq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_powq1_u10cuda(*a0, *a1); }
__global__ void xsinhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sinhq1_u10cuda(*a0); }
__global__ void xcoshq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_coshq1_u10cuda(*a0); }
__global__ void xtanhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_tanhq1_u10cuda(*a0); }
__global__ void xasinhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_asinhq1_u10cuda(*a0); }
__global__ void xacoshq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_acoshq1_u10cuda(*a0); }
__global__ void xatanhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_atanhq1_u10cuda(*a0); }
__global__ void xfabsq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_fabsq1_cuda(*a0); }
__global__ void xcopysignq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_copysignq1_cuda(*a0, *a1); }
__global__ void xfmaxq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fmaxq1_cuda(*a0, *a1); }
__global__ void xfminq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fminq1_cuda(*a0, *a1); }
__global__ void xfdimq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fdimq1_u05cuda(*a0, *a1); }
__global__ void xfmodq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fmodq1_cuda(*a0, *a1); }
__global__ void xremainderq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_remainderq1_cuda(*a0, *a1); }
__global__ void xfrexpq(Sleef_quadx1 *r, Sleef_quadx1 *a0, int *i0) { *r = Sleef_frexpq1_cuda(*a0, i0); }
__global__ void xmodfq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_modfq1_cuda(*a0, a1); }
__global__ void xtruncq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_truncq1_cuda(*a0); }
__global__ void xfloorq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_floorq1_cuda(*a0); }
__global__ void xceilq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_ceilq1_cuda(*a0); }
__global__ void xroundq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_roundq1_cuda(*a0); }
__global__ void xrintq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_rintq1_cuda(*a0); }
//
typedef union {
Sleef_quad q;
struct {
uint64_t l, h;
};
} cnv128;
#define BUFSIZE 1024
#define func_q_q(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
funcName<<<1, 1>>>(r, a0); \
cudaDeviceSynchronize(); \
c0.q = Sleef_getq1_cuda(*r, 0); \
printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_q_q_q(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0, c1; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l, &c1.h, &c1.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
*a1 = Sleef_setq1_cuda(*a1, 0, c1.q); \
funcName<<<1, 1>>>(r, a0, a1); \
cudaDeviceSynchronize(); \
c0.q = Sleef_getq1_cuda(*r, 0); \
printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_i_q_q(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0, c1; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l, &c1.h, &c1.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
*a1 = Sleef_setq1_cuda(*a1, 0, c1.q); \
funcName<<<1, 1>>>(i0, a0, a1); \
cudaDeviceSynchronize(); \
printf("%d\n", *i0); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_d_q(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
funcName<<<1, 1>>>(d0, a0); \
cudaDeviceSynchronize(); \
printf("%" PRIx64 "\n", d2u(*d0)); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_q_d(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
uint64_t u; \
sscanf(buf, funcStr " %" PRIx64, &u); \
*d0 = u2d(u); \
funcName<<<1, 1>>>(r, d0); \
cudaDeviceSynchronize(); \
cnv128 c0; \
c0.q = Sleef_getq1_cuda(*r, 0); \
printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_i64_q(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
funcName<<<1, 1>>>(i64, a0); \
cudaDeviceSynchronize(); \
printf("%" PRIx64 "\n", *i64); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_q_i64(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
sscanf(buf, funcStr " %" PRIx64, i64); \
funcName<<<1, 1>>>(r, i64); \
cudaDeviceSynchronize(); \
cnv128 c0; \
c0.q = Sleef_getq1_cuda(*r, 0); \
printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_u64_q(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
funcName<<<1, 1>>>(u64, a0); \
cudaDeviceSynchronize(); \
printf("%" PRIx64 "\n", *u64); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_q_u64(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
sscanf(buf, funcStr " %" PRIx64, u64); \
funcName<<<1, 1>>>(r, u64); \
cudaDeviceSynchronize(); \
cnv128 c0; \
c0.q = Sleef_getq1_cuda(*r, 0); \
printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_q_q_pi(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
funcName<<<1, 1>>>(r, a0, i0); \
cudaDeviceSynchronize(); \
c0.q = Sleef_getq1_cuda(*r, 0); \
printf("%" PRIx64 ":%" PRIx64 " %d\n", c0.h, c0.l, *i0); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
#define func_q_q_pq(funcStr, funcName) { \
while (startsWith(buf, funcStr " ")) { \
sentinel = 0; \
cnv128 c0, c1; \
sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \
*a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \
funcName<<<1, 1>>>(r, a0, a1); \
cudaDeviceSynchronize(); \
c0.q = Sleef_getq1_cuda(*r, 0); \
c1.q = Sleef_getq1_cuda(*a1, 0); \
printf("%" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l, c1.h, c1.l); \
fflush(stdout); \
if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \
} \
}
int main(int argc, char **argv) {
#if 0
cuInit(0);
int ndevice;
cuDeviceGetCount(&ndevice);
if (ndevice == 0) {
fprintf(stderr, "No cuda device available\n");
exit(0);
}
CUdevice device;
char deviceName[1024];
cuDeviceGet(&device, 0);
cuDeviceGetName(deviceName, 1000, device);
fprintf(stderr, "Device : %s\n", deviceName);
#endif
cudaSetDeviceFlags(cudaDeviceScheduleSpin);
Sleef_quadx1 *r, *a0, *a1;
double *d0;
int *i0;
int64_t *i64;
uint64_t *u64;
cudaMallocManaged(&r , 1*sizeof(Sleef_quadx1));
cudaMallocManaged(&a0, 1*sizeof(Sleef_quadx1));
cudaMallocManaged(&a1, 1*sizeof(Sleef_quadx1));
cudaMallocManaged(&d0, 1*sizeof(double));
cudaMallocManaged(&i0, 1*sizeof(int));
cudaMallocManaged(&i64, 1*sizeof(int64_t));
cudaMallocManaged(&u64, 1*sizeof(uint64_t));
//
printf("1\n");
fflush(stdout);
char buf[BUFSIZE];
fgets(buf, BUFSIZE-1, stdin);
int sentinel = 0;
while(!feof(stdin) && sentinel < 2) {
func_q_q_q("addq_u05", xaddq_u05);
func_q_q_q("subq_u05", xsubq_u05);
func_q_q_q("mulq_u05", xmulq_u05);
func_q_q_q("divq_u05", xdivq_u05);
func_q_q("sqrtq_u05", xsqrtq_u05);
func_q_q("cbrtq_u10", xcbrtq_u10);
func_q_q("sinq_u10", xsinq_u10);
func_q_q("cosq_u10", xcosq_u10);
func_q_q("tanq_u10", xtanq_u10);
func_q_q("asinq_u10", xasinq_u10);
func_q_q("acosq_u10", xacosq_u10);
func_q_q("atanq_u10", xatanq_u10);
func_q_q_q("atan2q_u10", xatan2q_u10);
func_q_q("expq_u10", xexpq_u10);
func_q_q("exp2q_u10", xexp2q_u10);
func_q_q("exp10q_u10", xexp10q_u10);
func_q_q("expm1q_u10", xexpm1q_u10);
func_q_q("logq_u10", xlogq_u10);
func_q_q("log2q_u10", xlog2q_u10);
func_q_q("log10q_u10", xlog10q_u10);
func_q_q("log1pq_u10", xlog1pq_u10);
func_q_q_q("powq_u10", xpowq_u10);
func_q_q("sinhq_u10", xsinhq_u10);
func_q_q("coshq_u10", xcoshq_u10);
func_q_q("tanhq_u10", xtanhq_u10);
func_q_q("asinhq_u10", xasinhq_u10);
func_q_q("acoshq_u10", xacoshq_u10);
func_q_q("atanhq_u10", xatanhq_u10);
func_q_q("negq", xnegq);
func_q_q("fabsq", xfabsq);
func_q_q_q("copysignq", xcopysignq);
func_q_q_q("fmaxq", xfmaxq);
func_q_q_q("fminq", xfminq);
func_q_q_q("fdimq_u05", xfdimq_u05);
func_q_q_q("fmodq", xfmodq);
func_q_q_q("remainderq", xremainderq);
func_q_q_pi("frexpq", xfrexpq);
func_q_q_pq("modfq", xmodfq);
func_q_q("truncq", xtruncq);
func_q_q("floorq", xfloorq);
func_q_q("ceilq", xceilq);
func_q_q("roundq", xroundq);
func_q_q("rintq", xrintq);
func_q_d("cast_from_doubleq", xcast_from_doubleq);
func_d_q("cast_to_doubleq", xcast_to_doubleq);
func_q_i64("cast_from_int64q", xcast_from_int64q);
func_i64_q("cast_to_int64q", xcast_to_int64q);
func_q_u64("cast_from_uint64q", xcast_from_uint64q);
func_u64_q("cast_to_uint64q", xcast_to_uint64q);
func_i_q_q("icmpltq", xicmpltq);
func_i_q_q("icmpgtq", xicmpgtq);
func_i_q_q("icmpleq", xicmpleq);
func_i_q_q("icmpgeq", xicmpgeq);
func_i_q_q("icmpeqq", xicmpeqq);
func_i_q_q("icmpneq", xicmpneq);
func_i_q_q("icmpq", xicmpq);
func_i_q_q("iunordq", xiunordq);
sentinel++;
}
//
return 0;
}
|
7599627f70b594a7ac33bf90cc76c4babfc05d22.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "wb.h"
#define TILE_WIDTH 16
__global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
__shared__ float sharedA[TILE_WIDTH][TILE_WIDTH];
__shared__ float sharedB[TILE_WIDTH][TILE_WIDTH];
unsigned int bx = blockIdx.x;
unsigned int by = blockIdx.y;
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int col = bx * TILE_WIDTH + tx;
unsigned int row = by * TILE_WIDTH + ty;
double sum = 0;
// Go through the phases, i is phase number.
for (unsigned int i = 0; i < (TILE_WIDTH + numAColumns - 1) / TILE_WIDTH; i++) {
// Load values
if ((i * TILE_WIDTH + tx) < numAColumns && row < numCRows) {
sharedA[ty][tx] = A[row * numAColumns + (i * TILE_WIDTH + tx)];
} else {
sharedA[ty][tx] = 0.0;
}
if ((i * TILE_WIDTH + ty) < numBRows && col < numCColumns) {
sharedB[ty][tx] = B[(i * TILE_WIDTH + ty) * numBColumns + col];
} else {
sharedB[ty][tx] = 0.0;
}
__syncthreads();
if (row < numCRows && col < numCColumns) {
for (int k = 0; k < TILE_WIDTH; k++) {
sum += sharedA[ty][k] * sharedB[k][tx];
}
}
__syncthreads();
}
if (row < numCRows && col < numCColumns) {
C[row * numCColumns + col] = sum;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows;
int numCColumns;
args = wbArg_read(argc, argv);
#if LAB_DEBUG
std::cout << "Running Tiled Matrix Multiplicaion ..." << std::endl;
#endif
wbTime_start(Generic, "Importing data and creating memory on host");
hostA =
(float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB =
(float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
hostC = (float *) malloc(numARows * numBColumns * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
numCRows = numARows;
numCColumns = numBColumns;
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns);
wbTime_start(GPU, "Allocating GPU memory.");
hipMalloc((void **) &deviceA, numARows * numAColumns * sizeof(float));
hipMalloc((void **) &deviceB, numBRows * numBColumns * sizeof(float));
hipMalloc((void **) &deviceC, numCRows * numCColumns * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
hipMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float),
hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
unsigned int threads = TILE_WIDTH;
unsigned int blocksX = (numCColumns + threads - 1) / threads;
unsigned int blocksY = (numCRows + threads - 1) / threads;
dim3 blockSize(threads, threads, 1);
dim3 gridSize(blocksX, blocksY, 1);
wbLog(TRACE, "The block dimensions are ", blockSize.x, " x ", blockSize.y);
wbLog(TRACE, "The grid dimensions are ", gridSize.x, " x ", gridSize.y);
wbTime_start(Compute, "Performing CUDA computation");
hipLaunchKernelGGL(( matrixMultiplyShared), dim3(gridSize), dim3(blockSize), 0, 0,
deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
hipMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float),
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
|
7599627f70b594a7ac33bf90cc76c4babfc05d22.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "wb.h"
#define TILE_WIDTH 16
__global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
__shared__ float sharedA[TILE_WIDTH][TILE_WIDTH];
__shared__ float sharedB[TILE_WIDTH][TILE_WIDTH];
unsigned int bx = blockIdx.x;
unsigned int by = blockIdx.y;
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int col = bx * TILE_WIDTH + tx;
unsigned int row = by * TILE_WIDTH + ty;
double sum = 0;
// Go through the phases, i is phase number.
for (unsigned int i = 0; i < (TILE_WIDTH + numAColumns - 1) / TILE_WIDTH; i++) {
// Load values
if ((i * TILE_WIDTH + tx) < numAColumns && row < numCRows) {
sharedA[ty][tx] = A[row * numAColumns + (i * TILE_WIDTH + tx)];
} else {
sharedA[ty][tx] = 0.0;
}
if ((i * TILE_WIDTH + ty) < numBRows && col < numCColumns) {
sharedB[ty][tx] = B[(i * TILE_WIDTH + ty) * numBColumns + col];
} else {
sharedB[ty][tx] = 0.0;
}
__syncthreads();
if (row < numCRows && col < numCColumns) {
for (int k = 0; k < TILE_WIDTH; k++) {
sum += sharedA[ty][k] * sharedB[k][tx];
}
}
__syncthreads();
}
if (row < numCRows && col < numCColumns) {
C[row * numCColumns + col] = sum;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows;
int numCColumns;
args = wbArg_read(argc, argv);
#if LAB_DEBUG
std::cout << "Running Tiled Matrix Multiplicaion ..." << std::endl;
#endif
wbTime_start(Generic, "Importing data and creating memory on host");
hostA =
(float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB =
(float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
hostC = (float *) malloc(numARows * numBColumns * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
numCRows = numARows;
numCColumns = numBColumns;
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns);
wbTime_start(GPU, "Allocating GPU memory.");
cudaMalloc((void **) &deviceA, numARows * numAColumns * sizeof(float));
cudaMalloc((void **) &deviceB, numBRows * numBColumns * sizeof(float));
cudaMalloc((void **) &deviceC, numCRows * numCColumns * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
cudaMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float),
cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
unsigned int threads = TILE_WIDTH;
unsigned int blocksX = (numCColumns + threads - 1) / threads;
unsigned int blocksY = (numCRows + threads - 1) / threads;
dim3 blockSize(threads, threads, 1);
dim3 gridSize(blocksX, blocksY, 1);
wbLog(TRACE, "The block dimensions are ", blockSize.x, " x ", blockSize.y);
wbLog(TRACE, "The grid dimensions are ", gridSize.x, " x ", gridSize.y);
wbTime_start(Compute, "Performing CUDA computation");
matrixMultiplyShared<<<gridSize, blockSize>>>(
deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
cudaMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float),
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
|
cc34f5838d1fef90af9dc1793f2f0d7789ef7ba7.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <array>
#include "paddle/fluid/framework/conv_search_cache.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/phi/kernels/funcs/padding.h"
DECLARE_int64(cudnn_exhaustive_search_times);
namespace paddle {
namespace operators {
#if PADDLE_WITH_HIP || CUDNN_VERSION >= 7100
using Tensor = phi::DenseTensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using ScopedActivationDescriptor = platform::ScopedActivationDescriptor;
using DataLayout = platform::DataLayout;
using framework::AlgorithmsCache;
using framework::ConvSearchCache;
using framework::SearchFuseResult;
template <typename T>
using ScalingParamType = typename platform::CudnnDataType<T>::ScalingParamType;
template <typename T>
class CUDNNConvFusionOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto* input = ctx.Input<phi::DenseTensor>("Input");
auto* filter = ctx.Input<phi::DenseTensor>("Filter");
auto* bias = ctx.Input<phi::DenseTensor>("Bias");
auto* residual = ctx.Input<phi::DenseTensor>("ResidualData");
auto* output = ctx.Output<phi::DenseTensor>("Output");
dev_ctx.template Alloc<T>(output, output->numel() * sizeof(T));
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
const std::string activation = ctx.Attr<std::string>("activation");
int groups = ctx.Attr<int>("groups");
int64_t user_workspace_size =
static_cast<size_t>(ctx.Attr<int>("workspace_size_MB"));
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
const T* filter_data = filter->data<T>();
const T* bias_data = bias->data<T>();
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
Tensor transformed_input_channel(input->dtype());
Tensor transformed_output(output->dtype());
transformed_input_channel = *input;
transformed_output = *output;
T* output_data = transformed_output.data<T>();
const T* residual_data = residual ? residual->data<T>() : output_data;
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims = phi::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
phi::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = phi::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(
&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = phi::funcs::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(phi::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
transformed_input =
ctx.AllocateTmpTensor<T, phi::GPUContext>(new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
phi::funcs::PadFunction<phi::GPUContext, T, 4>(
dev_ctx,
input_pad,
transformed_input_channel,
pad_value,
&transformed_input);
} break;
case 5: {
phi::funcs::PadFunction<phi::GPUContext, T, 5>(
dev_ctx,
input_pad,
transformed_input_channel,
pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::PermissionDenied(
"Operator Conv2DFusion expects Input to be a 4-D or 5-D Tensor. "
"But received the actual dimension = %d, shape = [%s].",
rank,
transformed_input_channel.dims()));
}
} else {
transformed_input = transformed_input_channel;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
// ------------------- cudnn descriptors ---------------------
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedFilterDescriptor filter_desc;
ScopedTensorDescriptor bias_desc;
ScopedConvolutionDescriptor conv_desc;
ScopedActivationDescriptor act_desc;
DataLayout layout = DataLayout::kNCHW;
if (input->dims().size() == 5) {
layout = DataLayout::kNCDHW;
}
#ifdef PADDLE_WITH_HIP
miopenConvolutionDescriptor_t cudnn_conv_desc =
conv_desc.descriptor<T>(padding_common, strides, dilations);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenSetConvolutionGroupCount(cudnn_conv_desc,
groups));
// Now only support NCHW
std::vector<int> bias_dim = {
1, static_cast<int>(transformed_output.dims()[1]), 1, 1};
miopenTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_input.dims()));
miopenTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_output.dims()));
miopenTensorDescriptor_t cudnn_filter_desc =
filter_desc.descriptor<T>(layout, phi::vectorize<int>(filter->dims()));
miopenTensorDescriptor_t cudnn_bias_desc =
bias_desc.descriptor<T>(layout, bias_dim);
miopenActivationDescriptor_t cudnn_act_desc =
act_desc.descriptor<T>(activation);
miopenConvFwdAlgorithm_t algo;
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
auto x_dims = phi::vectorize(transformed_input.dims());
auto f_dims = phi::vectorize(filter->dims());
size_t workspace_size = 0;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenConvolutionForwardGetWorkSpaceSize(
handle,
cudnn_filter_desc,
cudnn_input_desc,
cudnn_conv_desc,
cudnn_output_desc,
&workspace_size));
int find_count;
miopenConvAlgoPerf_t find_result;
auto cudnn_find_func = [&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenFindConvolutionForwardAlgorithm(
handle,
cudnn_input_desc,
input_data,
cudnn_filter_desc,
filter_data,
cudnn_conv_desc,
cudnn_output_desc,
output_data,
kNUM_CUDNN_FWD_ALGS,
&find_count,
&find_result,
cudnn_workspace_ptr,
workspace_size,
false));
};
workspace_handle.RunFuncSync(cudnn_find_func, workspace_size);
algo = find_result.fwd_algo;
VLOG(3) << "cuDNN forward algo " << algo;
{
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenConvolutionForward(handle,
&alpha,
cudnn_input_desc,
input_data,
cudnn_filter_desc,
filter_data,
cudnn_conv_desc,
algo,
&beta,
cudnn_output_desc,
output_data,
cudnn_workspace,
workspace_size));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenConvolutionForwardBias(handle,
&alpha,
cudnn_bias_desc,
bias_data,
&beta,
cudnn_output_desc,
output_data));
if (activation != "identity") {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenActivationForward(handle,
cudnn_act_desc,
&alpha,
cudnn_output_desc,
output_data,
&beta,
cudnn_output_desc,
output_data));
}
if (residual) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenOpTensor(handle,
miopenTensorOpAdd,
&alpha,
cudnn_output_desc,
output_data,
&alpha,
cudnn_output_desc,
residual_data,
&beta,
cudnn_output_desc,
output_data));
}
}
#else // PADDLE_WITH_HIP
cudnnConvolutionDescriptor_t cudnn_conv_desc =
conv_desc.descriptor<T>(padding_common, strides, dilations);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionGroupCount(
cudnn_conv_desc, groups));
cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_input.dims()));
cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_output.dims()));
cudnnFilterDescriptor_t cudnn_filter_desc =
filter_desc.descriptor<T>(layout, phi::vectorize<int>(filter->dims()));
// Now only support NCHW
std::vector<int> bias_dim = {
1, static_cast<int>(transformed_output.dims()[1]), 1, 1};
cudnnTensorDescriptor_t cudnn_bias_desc =
bias_desc.descriptor<T>(layout, bias_dim);
cudnnActivationDescriptor_t cudnn_act_desc =
act_desc.descriptor<T>(activation);
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size_in_bytes; // final workspace to allocate.
size_t workspace_size_limit = 0;
if (FLAGS_conv_workspace_size_limit > 0 || user_workspace_size > 0) {
int64_t max_user_size =
::min(static_cast<int64_t>(FLAGS_conv_workspace_size_limit),
user_workspace_size);
workspace_size_limit = max_user_size * 1024 * 1024;
}
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionFwdAlgo_t algo;
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
auto dtype = platform::CudnnDataType<T>::type;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType(
cudnn_conv_desc, CUDNN_DEFAULT_MATH));
if (dtype == CUDNN_DATA_HALF) {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType(
cudnn_conv_desc, CUDNN_TENSOR_OP_MATH));
}
#if TORCH_HIP_VERSION >= 11000 && CUDNN_VERSION >= 8000
if (!platform::allow_tf32_cudnn) {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType(
cudnn_conv_desc, CUDNN_FMA_MATH));
}
#endif // TORCH_HIP_VERSION >= 11000 && CUDNN_VERSION >= 8000
auto x_dims = phi::vectorize(transformed_input.dims());
auto f_dims = phi::vectorize(filter->dims());
if (!exhaustive_search) {
#if CUDNN_VERSION >= 8000
int perf_count;
int best_algo_idx = 0;
size_t tmp_size = 0;
std::unique_ptr<cudnnConvolutionFwdAlgoPerf_t[]> perf_results(
new cudnnConvolutionFwdAlgoPerf_t[kNUM_CUDNN_FWD_ALGS]);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardAlgorithm_v7(
handle,
cudnn_input_desc,
cudnn_filter_desc,
cudnn_conv_desc,
cudnn_output_desc,
kNUM_CUDNN_FWD_ALGS,
&perf_count,
perf_results.get()));
algo = (perf_results.get())[best_algo_idx].algo;
#else
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardAlgorithm(
handle,
cudnn_input_desc,
cudnn_filter_desc,
cudnn_conv_desc,
cudnn_output_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit,
&algo));
#endif
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle,
cudnn_input_desc,
cudnn_filter_desc,
cudnn_conv_desc,
cudnn_output_desc,
algo,
&workspace_size_in_bytes));
if (workspace_size_in_bytes > workspace_size_limit)
workspace_size_limit = workspace_size_in_bytes;
VLOG(3) << "cuDNN forward algo " << algo;
} else {
std::function<SearchFuseResult<cudnnConvolutionFwdAlgo_t>()> search_func =
[&]() -> SearchFuseResult<cudnnConvolutionFwdAlgo_t> {
int returned_algo_count;
SearchFuseResult<cudnnConvolutionFwdAlgo_t> fwd_result;
std::array<cudnnConvolutionFwdAlgoPerf_t, kNUM_CUDNN_FWD_ALGS>
fwd_perf_stat;
auto cudnn_find_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnFindConvolutionForwardAlgorithmEx(
handle,
cudnn_input_desc,
input_data,
cudnn_filter_desc,
filter_data,
cudnn_conv_desc,
cudnn_output_desc,
output_data,
kNUM_CUDNN_FWD_ALGS,
&returned_algo_count,
fwd_perf_stat.data(),
cudnn_workspace,
workspace_size_limit));
};
workspace_handle.RunFuncSync(cudnn_find_func, workspace_size_limit);
VLOG(3) << "Perf result: (algo: stat, time, memory)";
for (int i = 0; i < returned_algo_count; ++i) {
const auto& stat = fwd_perf_stat[i];
VLOG(3) << stat.algo << ": " << stat.status << " " << stat.time << " "
<< stat.memory;
}
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle,
cudnn_input_desc,
cudnn_filter_desc,
cudnn_conv_desc,
cudnn_output_desc,
fwd_perf_stat[0].algo,
&workspace_size_in_bytes));
// PADDLE_ENFORCE_LE(
// workspace_size_in_bytes,
// workspace_size_limit,
// platform::errors::InvalidArgument(
// "The actual workspace size to be allocated for cuDNN is
// expected " "to be less than the limit. But received: the
// actual workspace " "size = %d, limit = %d.",
// workspace_size_in_bytes,
// workspace_size_limit));
fwd_result.algo = fwd_perf_stat[0].algo;
fwd_result.workspace_size = workspace_size_in_bytes;
return fwd_result;
};
AlgorithmsCache<SearchFuseResult<cudnnConvolutionFwdAlgo_t>>& algo_cache =
*(framework::ConvSearchCache::Instance().GetConvFusion());
int search_times = ctx.Attr<int>("search_times");
SearchFuseResult<cudnnConvolutionFwdAlgo_t> algo_result;
search_times = ::max(
static_cast<int>(FLAGS_cudnn_exhaustive_search_times), search_times);
// TODO(dangqingqing): Unify this if-else.
if (search_times > 0) {
// The searched algo will be cached by `search_times` times for
// different input dimension. For other dimensions, select the algo
// of closest area.
algo_result = algo_cache.GetAlgorithm(
x_dims[2] * x_dims[3], search_times, 0, search_func);
algo = algo_result.algo;
workspace_size_in_bytes = algo_result.workspace_size;
} else {
algo_result = algo_cache.GetAlgorithm(x_dims,
f_dims,
strides,
paddings,
dilations,
0,
dtype,
search_func);
algo = algo_result.algo;
workspace_size_in_bytes = algo_result.workspace_size;
}
VLOG(3) << "choose algo " << algo;
}
if ((activation == "identity") && (!residual)) {
// Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is
// enabled with CUDNN_ACTIVATION_IDENTITY in cuDNN lib.
// But test in some case, the speed is slower, change to use
// cudnnConvolutionForward and cudnnAddTensor
// ------------- cudnn conv forward and bias add ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnConvolutionForward(handle,
&alpha,
cudnn_input_desc,
input_data,
cudnn_filter_desc,
filter_data,
cudnn_conv_desc,
algo,
cudnn_workspace,
workspace_size_in_bytes,
&beta,
cudnn_output_desc,
output_data));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnAddTensor(handle,
&alpha,
cudnn_bias_desc,
bias_data,
&alpha,
cudnn_output_desc,
output_data));
} else {
if (activation == "identity") {
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
}
// ------------------- cudnn conv+bias+act forward --------------------
ScalingParamType<T> alpha1 = 1.0f;
ScalingParamType<T> alpha2 = residual ? 1.0f : 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnConvolutionBiasActivationForward(
handle,
&alpha1,
cudnn_input_desc,
input_data,
cudnn_filter_desc,
filter_data,
cudnn_conv_desc,
algo,
cudnn_workspace,
workspace_size_in_bytes,
&alpha2,
cudnn_output_desc,
residual_data,
cudnn_bias_desc,
bias_data,
cudnn_act_desc,
cudnn_output_desc,
output_data));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
}
#endif
std::vector<int> channels = ctx.Attr<std::vector<int>>("split_channels");
if (channels.size()) {
auto outs = ctx.MultiOutput<phi::DenseTensor>("Outputs");
if (x_dims[0] == 1) {
// share data with Output
phi::DenseTensor t;
t.ShareDataWith(*output);
auto y_dims = output->dims();
t.Resize({y_dims[1], y_dims[2], y_dims[3]});
int s = 0;
for (size_t i = 0; i < channels.size(); ++i) {
int e = s + channels[i];
outs[i]->ShareDataWith(t.Slice(s, e));
outs[i]->Resize({x_dims[0], channels[i], y_dims[2], y_dims[3]});
s = e;
}
} else {
// TODO(qingiqng): do copy when batch size large than 1
PADDLE_THROW(platform::errors::Unimplemented(
"Input with batch size greater than 1 is unsupported. The received "
"batch size is %d, Input's shape is [%s].",
x_dims[0],
phi::make_ddim(x_dims)));
}
}
}
};
#endif
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
#if CUDNN_VERSION >= 7100
REGISTER_OP_CUDA_KERNEL(
conv2d_fusion,
ops::CUDNNConvFusionOpKernel<float>,
ops::CUDNNConvFusionOpKernel<double>,
ops::CUDNNConvFusionOpKernel<paddle::platform::float16>);
#endif
#ifdef PADDLE_WITH_HIP
REGISTER_OP_CUDA_KERNEL(conv2d_fusion, ops::CUDNNConvFusionOpKernel<float>);
#endif
|
cc34f5838d1fef90af9dc1793f2f0d7789ef7ba7.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <array>
#include "paddle/fluid/framework/conv_search_cache.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/phi/kernels/funcs/padding.h"
DECLARE_int64(cudnn_exhaustive_search_times);
namespace paddle {
namespace operators {
#if PADDLE_WITH_HIP || CUDNN_VERSION >= 7100
using Tensor = phi::DenseTensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using ScopedActivationDescriptor = platform::ScopedActivationDescriptor;
using DataLayout = platform::DataLayout;
using framework::AlgorithmsCache;
using framework::ConvSearchCache;
using framework::SearchFuseResult;
template <typename T>
using ScalingParamType = typename platform::CudnnDataType<T>::ScalingParamType;
template <typename T>
class CUDNNConvFusionOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto* input = ctx.Input<phi::DenseTensor>("Input");
auto* filter = ctx.Input<phi::DenseTensor>("Filter");
auto* bias = ctx.Input<phi::DenseTensor>("Bias");
auto* residual = ctx.Input<phi::DenseTensor>("ResidualData");
auto* output = ctx.Output<phi::DenseTensor>("Output");
dev_ctx.template Alloc<T>(output, output->numel() * sizeof(T));
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
const std::string activation = ctx.Attr<std::string>("activation");
int groups = ctx.Attr<int>("groups");
int64_t user_workspace_size =
static_cast<size_t>(ctx.Attr<int>("workspace_size_MB"));
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
const T* filter_data = filter->data<T>();
const T* bias_data = bias->data<T>();
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
Tensor transformed_input_channel(input->dtype());
Tensor transformed_output(output->dtype());
transformed_input_channel = *input;
transformed_output = *output;
T* output_data = transformed_output.data<T>();
const T* residual_data = residual ? residual->data<T>() : output_data;
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims = phi::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
phi::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = phi::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(
&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = phi::funcs::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(phi::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
transformed_input =
ctx.AllocateTmpTensor<T, phi::GPUContext>(new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
phi::funcs::PadFunction<phi::GPUContext, T, 4>(
dev_ctx,
input_pad,
transformed_input_channel,
pad_value,
&transformed_input);
} break;
case 5: {
phi::funcs::PadFunction<phi::GPUContext, T, 5>(
dev_ctx,
input_pad,
transformed_input_channel,
pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::PermissionDenied(
"Operator Conv2DFusion expects Input to be a 4-D or 5-D Tensor. "
"But received the actual dimension = %d, shape = [%s].",
rank,
transformed_input_channel.dims()));
}
} else {
transformed_input = transformed_input_channel;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
// ------------------- cudnn descriptors ---------------------
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedFilterDescriptor filter_desc;
ScopedTensorDescriptor bias_desc;
ScopedConvolutionDescriptor conv_desc;
ScopedActivationDescriptor act_desc;
DataLayout layout = DataLayout::kNCHW;
if (input->dims().size() == 5) {
layout = DataLayout::kNCDHW;
}
#ifdef PADDLE_WITH_HIP
miopenConvolutionDescriptor_t cudnn_conv_desc =
conv_desc.descriptor<T>(padding_common, strides, dilations);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenSetConvolutionGroupCount(cudnn_conv_desc,
groups));
// Now only support NCHW
std::vector<int> bias_dim = {
1, static_cast<int>(transformed_output.dims()[1]), 1, 1};
miopenTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_input.dims()));
miopenTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_output.dims()));
miopenTensorDescriptor_t cudnn_filter_desc =
filter_desc.descriptor<T>(layout, phi::vectorize<int>(filter->dims()));
miopenTensorDescriptor_t cudnn_bias_desc =
bias_desc.descriptor<T>(layout, bias_dim);
miopenActivationDescriptor_t cudnn_act_desc =
act_desc.descriptor<T>(activation);
miopenConvFwdAlgorithm_t algo;
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
auto x_dims = phi::vectorize(transformed_input.dims());
auto f_dims = phi::vectorize(filter->dims());
size_t workspace_size = 0;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenConvolutionForwardGetWorkSpaceSize(
handle,
cudnn_filter_desc,
cudnn_input_desc,
cudnn_conv_desc,
cudnn_output_desc,
&workspace_size));
int find_count;
miopenConvAlgoPerf_t find_result;
auto cudnn_find_func = [&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenFindConvolutionForwardAlgorithm(
handle,
cudnn_input_desc,
input_data,
cudnn_filter_desc,
filter_data,
cudnn_conv_desc,
cudnn_output_desc,
output_data,
kNUM_CUDNN_FWD_ALGS,
&find_count,
&find_result,
cudnn_workspace_ptr,
workspace_size,
false));
};
workspace_handle.RunFuncSync(cudnn_find_func, workspace_size);
algo = find_result.fwd_algo;
VLOG(3) << "cuDNN forward algo " << algo;
{
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenConvolutionForward(handle,
&alpha,
cudnn_input_desc,
input_data,
cudnn_filter_desc,
filter_data,
cudnn_conv_desc,
algo,
&beta,
cudnn_output_desc,
output_data,
cudnn_workspace,
workspace_size));
};
workspace_handle.RunFunc(cudnn_func, workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenConvolutionForwardBias(handle,
&alpha,
cudnn_bias_desc,
bias_data,
&beta,
cudnn_output_desc,
output_data));
if (activation != "identity") {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenActivationForward(handle,
cudnn_act_desc,
&alpha,
cudnn_output_desc,
output_data,
&beta,
cudnn_output_desc,
output_data));
}
if (residual) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::miopenOpTensor(handle,
miopenTensorOpAdd,
&alpha,
cudnn_output_desc,
output_data,
&alpha,
cudnn_output_desc,
residual_data,
&beta,
cudnn_output_desc,
output_data));
}
}
#else // PADDLE_WITH_HIP
cudnnConvolutionDescriptor_t cudnn_conv_desc =
conv_desc.descriptor<T>(padding_common, strides, dilations);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionGroupCount(
cudnn_conv_desc, groups));
cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_input.dims()));
cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor<T>(
layout, phi::vectorize<int>(transformed_output.dims()));
cudnnFilterDescriptor_t cudnn_filter_desc =
filter_desc.descriptor<T>(layout, phi::vectorize<int>(filter->dims()));
// Now only support NCHW
std::vector<int> bias_dim = {
1, static_cast<int>(transformed_output.dims()[1]), 1, 1};
cudnnTensorDescriptor_t cudnn_bias_desc =
bias_desc.descriptor<T>(layout, bias_dim);
cudnnActivationDescriptor_t cudnn_act_desc =
act_desc.descriptor<T>(activation);
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size_in_bytes; // final workspace to allocate.
size_t workspace_size_limit = 0;
if (FLAGS_conv_workspace_size_limit > 0 || user_workspace_size > 0) {
int64_t max_user_size =
std::min(static_cast<int64_t>(FLAGS_conv_workspace_size_limit),
user_workspace_size);
workspace_size_limit = max_user_size * 1024 * 1024;
}
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionFwdAlgo_t algo;
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
auto dtype = platform::CudnnDataType<T>::type;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType(
cudnn_conv_desc, CUDNN_DEFAULT_MATH));
if (dtype == CUDNN_DATA_HALF) {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType(
cudnn_conv_desc, CUDNN_TENSOR_OP_MATH));
}
#if CUDA_VERSION >= 11000 && CUDNN_VERSION >= 8000
if (!platform::allow_tf32_cudnn) {
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnSetConvolutionMathType(
cudnn_conv_desc, CUDNN_FMA_MATH));
}
#endif // CUDA_VERSION >= 11000 && CUDNN_VERSION >= 8000
auto x_dims = phi::vectorize(transformed_input.dims());
auto f_dims = phi::vectorize(filter->dims());
if (!exhaustive_search) {
#if CUDNN_VERSION >= 8000
int perf_count;
int best_algo_idx = 0;
size_t tmp_size = 0;
std::unique_ptr<cudnnConvolutionFwdAlgoPerf_t[]> perf_results(
new cudnnConvolutionFwdAlgoPerf_t[kNUM_CUDNN_FWD_ALGS]);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardAlgorithm_v7(
handle,
cudnn_input_desc,
cudnn_filter_desc,
cudnn_conv_desc,
cudnn_output_desc,
kNUM_CUDNN_FWD_ALGS,
&perf_count,
perf_results.get()));
algo = (perf_results.get())[best_algo_idx].algo;
#else
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardAlgorithm(
handle,
cudnn_input_desc,
cudnn_filter_desc,
cudnn_conv_desc,
cudnn_output_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit,
&algo));
#endif
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle,
cudnn_input_desc,
cudnn_filter_desc,
cudnn_conv_desc,
cudnn_output_desc,
algo,
&workspace_size_in_bytes));
if (workspace_size_in_bytes > workspace_size_limit)
workspace_size_limit = workspace_size_in_bytes;
VLOG(3) << "cuDNN forward algo " << algo;
} else {
std::function<SearchFuseResult<cudnnConvolutionFwdAlgo_t>()> search_func =
[&]() -> SearchFuseResult<cudnnConvolutionFwdAlgo_t> {
int returned_algo_count;
SearchFuseResult<cudnnConvolutionFwdAlgo_t> fwd_result;
std::array<cudnnConvolutionFwdAlgoPerf_t, kNUM_CUDNN_FWD_ALGS>
fwd_perf_stat;
auto cudnn_find_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnFindConvolutionForwardAlgorithmEx(
handle,
cudnn_input_desc,
input_data,
cudnn_filter_desc,
filter_data,
cudnn_conv_desc,
cudnn_output_desc,
output_data,
kNUM_CUDNN_FWD_ALGS,
&returned_algo_count,
fwd_perf_stat.data(),
cudnn_workspace,
workspace_size_limit));
};
workspace_handle.RunFuncSync(cudnn_find_func, workspace_size_limit);
VLOG(3) << "Perf result: (algo: stat, time, memory)";
for (int i = 0; i < returned_algo_count; ++i) {
const auto& stat = fwd_perf_stat[i];
VLOG(3) << stat.algo << ": " << stat.status << " " << stat.time << " "
<< stat.memory;
}
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle,
cudnn_input_desc,
cudnn_filter_desc,
cudnn_conv_desc,
cudnn_output_desc,
fwd_perf_stat[0].algo,
&workspace_size_in_bytes));
// PADDLE_ENFORCE_LE(
// workspace_size_in_bytes,
// workspace_size_limit,
// platform::errors::InvalidArgument(
// "The actual workspace size to be allocated for cuDNN is
// expected " "to be less than the limit. But received: the
// actual workspace " "size = %d, limit = %d.",
// workspace_size_in_bytes,
// workspace_size_limit));
fwd_result.algo = fwd_perf_stat[0].algo;
fwd_result.workspace_size = workspace_size_in_bytes;
return fwd_result;
};
AlgorithmsCache<SearchFuseResult<cudnnConvolutionFwdAlgo_t>>& algo_cache =
*(framework::ConvSearchCache::Instance().GetConvFusion());
int search_times = ctx.Attr<int>("search_times");
SearchFuseResult<cudnnConvolutionFwdAlgo_t> algo_result;
search_times = std::max(
static_cast<int>(FLAGS_cudnn_exhaustive_search_times), search_times);
// TODO(dangqingqing): Unify this if-else.
if (search_times > 0) {
// The searched algo will be cached by `search_times` times for
// different input dimension. For other dimensions, select the algo
// of closest area.
algo_result = algo_cache.GetAlgorithm(
x_dims[2] * x_dims[3], search_times, 0, search_func);
algo = algo_result.algo;
workspace_size_in_bytes = algo_result.workspace_size;
} else {
algo_result = algo_cache.GetAlgorithm(x_dims,
f_dims,
strides,
paddings,
dilations,
0,
dtype,
search_func);
algo = algo_result.algo;
workspace_size_in_bytes = algo_result.workspace_size;
}
VLOG(3) << "choose algo " << algo;
}
if ((activation == "identity") && (!residual)) {
// Only the CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM algo is
// enabled with CUDNN_ACTIVATION_IDENTITY in cuDNN lib.
// But test in some case, the speed is slower, change to use
// cudnnConvolutionForward and cudnnAddTensor
// ------------- cudnn conv forward and bias add ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnConvolutionForward(handle,
&alpha,
cudnn_input_desc,
input_data,
cudnn_filter_desc,
filter_data,
cudnn_conv_desc,
algo,
cudnn_workspace,
workspace_size_in_bytes,
&beta,
cudnn_output_desc,
output_data));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnAddTensor(handle,
&alpha,
cudnn_bias_desc,
bias_data,
&alpha,
cudnn_output_desc,
output_data));
} else {
if (activation == "identity") {
algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
}
// ------------------- cudnn conv+bias+act forward --------------------
ScalingParamType<T> alpha1 = 1.0f;
ScalingParamType<T> alpha2 = residual ? 1.0f : 0.0f;
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cudnnConvolutionBiasActivationForward(
handle,
&alpha1,
cudnn_input_desc,
input_data,
cudnn_filter_desc,
filter_data,
cudnn_conv_desc,
algo,
cudnn_workspace,
workspace_size_in_bytes,
&alpha2,
cudnn_output_desc,
residual_data,
cudnn_bias_desc,
bias_data,
cudnn_act_desc,
cudnn_output_desc,
output_data));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
}
#endif
std::vector<int> channels = ctx.Attr<std::vector<int>>("split_channels");
if (channels.size()) {
auto outs = ctx.MultiOutput<phi::DenseTensor>("Outputs");
if (x_dims[0] == 1) {
// share data with Output
phi::DenseTensor t;
t.ShareDataWith(*output);
auto y_dims = output->dims();
t.Resize({y_dims[1], y_dims[2], y_dims[3]});
int s = 0;
for (size_t i = 0; i < channels.size(); ++i) {
int e = s + channels[i];
outs[i]->ShareDataWith(t.Slice(s, e));
outs[i]->Resize({x_dims[0], channels[i], y_dims[2], y_dims[3]});
s = e;
}
} else {
// TODO(qingiqng): do copy when batch size large than 1
PADDLE_THROW(platform::errors::Unimplemented(
"Input with batch size greater than 1 is unsupported. The received "
"batch size is %d, Input's shape is [%s].",
x_dims[0],
phi::make_ddim(x_dims)));
}
}
}
};
#endif
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
#if CUDNN_VERSION >= 7100
REGISTER_OP_CUDA_KERNEL(
conv2d_fusion,
ops::CUDNNConvFusionOpKernel<float>,
ops::CUDNNConvFusionOpKernel<double>,
ops::CUDNNConvFusionOpKernel<paddle::platform::float16>);
#endif
#ifdef PADDLE_WITH_HIP
REGISTER_OP_CUDA_KERNEL(conv2d_fusion, ops::CUDNNConvFusionOpKernel<float>);
#endif
|
981344b5bedfba4d033644e137a53f67497bed13.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <THH/THHAtomics.cuh>
using at::Tensor;
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long int) * 8;
__device__ inline bool devIoU(float const *const a, float const *const b, const float threshold)
{
float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]);
float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]);
float width = fmaxf(right - left, 0.f),
height = fmaxf(bottom - top, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0]) * (a[3] - a[1]);
float Sb = (b[2] - b[0]) * (b[3] - b[1]);
return interS > threshold * (Sa + Sb - interS);
}
__global__ void nms_cuda(const int n_boxes, const float iou_threshold, const float *dev_boxes, unsigned long long *dev_mask)
{
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int tid = threadIdx.x;
if (row_start > col_start) return;
const int row_size = fminf(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size = fminf(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 4];
if (tid < col_size)
{
block_boxes[tid * 4 + 0] = dev_boxes[(threadsPerBlock * col_start + tid) * 4 + 0];
block_boxes[tid * 4 + 1] = dev_boxes[(threadsPerBlock * col_start + tid) * 4 + 1];
block_boxes[tid * 4 + 2] = dev_boxes[(threadsPerBlock * col_start + tid) * 4 + 2];
block_boxes[tid * 4 + 3] = dev_boxes[(threadsPerBlock * col_start + tid) * 4 + 3];
}
__syncthreads();
if (tid < row_size)
{
const int cur_box_idx = threadsPerBlock * row_start + tid;
const float *cur_box = dev_boxes + cur_box_idx * 4;
int i = 0;
unsigned long long int t = 0;
int start = 0;
if (row_start == col_start)
{
start = tid + 1;
}
for (i = start; i < col_size; i++)
{
if (devIoU(cur_box, block_boxes + i * 4, iou_threshold))
{
t |= 1ULL << i;
}
}
dev_mask[cur_box_idx * gridDim.y + col_start] = t;
}
}
Tensor NMSCUDAKernelLauncher(Tensor boxes, Tensor scores, float iou_threshold)
{
if (boxes.numel() == 0)
{
return at::empty({0}, boxes.options().dtype(at::kLong));
}
auto order_t = std::get<1>(scores.sort(0, /*descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
Tensor mask = at::empty({boxes_num, col_blocks}, boxes.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( nms_cuda), dim3(blocks), dim3(threads), 0, stream, boxes_num, iou_threshold, boxes_sorted.data_ptr<float>(), (unsigned long long*)mask.data_ptr<int64_t>());
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep_t = at::zeros({boxes_num}, boxes.options().dtype(at::kBool).device(at::kCPU));
bool* keep = keep_t.data_ptr<bool>();
for (int i = 0; i < boxes_num; i++)
{
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock)))
{
keep[i] = true;
// set every overlap box with bit 1 in remv
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++)
{
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(hipGetLastError());
return order_t.masked_select(keep_t.to(at::kCUDA));
}
|
981344b5bedfba4d033644e137a53f67497bed13.cu
|
#include <cuda.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <THC/THCAtomics.cuh>
using at::Tensor;
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long int) * 8;
__device__ inline bool devIoU(float const *const a, float const *const b, const float threshold)
{
float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]);
float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]);
float width = fmaxf(right - left, 0.f),
height = fmaxf(bottom - top, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0]) * (a[3] - a[1]);
float Sb = (b[2] - b[0]) * (b[3] - b[1]);
return interS > threshold * (Sa + Sb - interS);
}
__global__ void nms_cuda(const int n_boxes, const float iou_threshold, const float *dev_boxes, unsigned long long *dev_mask)
{
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int tid = threadIdx.x;
if (row_start > col_start) return;
const int row_size = fminf(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size = fminf(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 4];
if (tid < col_size)
{
block_boxes[tid * 4 + 0] = dev_boxes[(threadsPerBlock * col_start + tid) * 4 + 0];
block_boxes[tid * 4 + 1] = dev_boxes[(threadsPerBlock * col_start + tid) * 4 + 1];
block_boxes[tid * 4 + 2] = dev_boxes[(threadsPerBlock * col_start + tid) * 4 + 2];
block_boxes[tid * 4 + 3] = dev_boxes[(threadsPerBlock * col_start + tid) * 4 + 3];
}
__syncthreads();
if (tid < row_size)
{
const int cur_box_idx = threadsPerBlock * row_start + tid;
const float *cur_box = dev_boxes + cur_box_idx * 4;
int i = 0;
unsigned long long int t = 0;
int start = 0;
if (row_start == col_start)
{
start = tid + 1;
}
for (i = start; i < col_size; i++)
{
if (devIoU(cur_box, block_boxes + i * 4, iou_threshold))
{
t |= 1ULL << i;
}
}
dev_mask[cur_box_idx * gridDim.y + col_start] = t;
}
}
Tensor NMSCUDAKernelLauncher(Tensor boxes, Tensor scores, float iou_threshold)
{
if (boxes.numel() == 0)
{
return at::empty({0}, boxes.options().dtype(at::kLong));
}
auto order_t = std::get<1>(scores.sort(0, /*descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
Tensor mask = at::empty({boxes_num, col_blocks}, boxes.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
nms_cuda<<<blocks, threads, 0, stream>>>(boxes_num, iou_threshold, boxes_sorted.data_ptr<float>(), (unsigned long long*)mask.data_ptr<int64_t>());
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep_t = at::zeros({boxes_num}, boxes.options().dtype(at::kBool).device(at::kCPU));
bool* keep = keep_t.data_ptr<bool>();
for (int i = 0; i < boxes_num; i++)
{
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock)))
{
keep[i] = true;
// set every overlap box with bit 1 in remv
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++)
{
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(cudaGetLastError());
return order_t.masked_select(keep_t.to(at::kCUDA));
}
|
e04c71e24326c7cc9c37e5b425f862d32e4e5885.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include "kernel/gpu/cuda_impl/concatv2_impl.cuh"
template <typename T>
__global__ void Concat(const size_t size, const int w1, const int w2, const T* input_1, const T* input_2, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int n = pos / (w1 + w2);
int m = pos % (w1 + w2);
output[pos] = m >= w1 ? input_2[n * w2 + m - w1] : input_1[n * w1 + m];
}
return;
}
template <typename T>
__global__ void Concat(const size_t size, const int w1, const int w2, const int w3,
const T* input_1, const T* input_2, const T* input_3, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int n = pos / (w1 + w2 + w3);
int m = pos % (w1 + w2 + w3);
output[pos] = m < w1 ? input_1[n * w1 + m] :
m < w1 + w2 ? input_2[n * w2 + m - w1] :
input_3[n * w3 + m - w1 - w2];
}
return;
}
template <typename T>
__global__ void Concat(const size_t size, const int w1, const int w2, const int w3, const int w4,
const T* input_1, const T* input_2, const T* input_3, const T* input_4, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int n = pos / (w1 + w2 + w3 + w4);
int m = pos % (w1 + w2 + w3 + w4);
output[pos] = m < w1 ? input_1[n * w1 + m] :
m < w1 + w2 ? input_2[n * w2 + m - w1]:
m < w1 + w2 + w3 ? input_3[n * w3 + m - w1 - w2]:
input_4[n * w4 + m - w1 - w2 - w3];
}
return;
}
template <typename T>
void ConcatKernel(const size_t size, const int w1, const int w2, const T* input_1, const T* input_2, T* output,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( Concat), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, w1, w2, input_1, input_2, output);
return;
}
template <typename T>
void ConcatKernel(const size_t size, const int w1, const int w2, const int w3,
const T* input_1, const T* input_2, const T* input_3, T* output,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( Concat), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, w1, w2, w3, input_1, input_2, input_3, output);
return;
}
template <typename T>
void ConcatKernel(const size_t size, const int w1, const int w2, const int w3, const int w4,
const T* input_1, const T* input_2, const T* input_3, const T* input_4, T* output,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( Concat), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, w1, w2, w3, w4, input_1,
input_2, input_3, input_4, output);
return;
}
template void ConcatKernel(const size_t size, const int w1, const int w2, const float* input_1, const float* input_2,
float* output, hipStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int* input_1, const int* input_2,
int* output, hipStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const half* input_1, const half* input_2,
half* output, hipStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int w3,
const float* input_1, const float* input_2, const float* input_3,
float* output, hipStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int w3,
const int* input_1, const int* input_2, const int* input_3,
int* output, hipStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int w3,
const half* input_1, const half* input_2, const half* input_3,
half* output, hipStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int w3, const int w4,
const float* input_1, const float* input_2, const float* input_3, const float* input_4,
float* output, hipStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int w3, const int w4,
const int* input_1, const int* input_2, const int* input_3, const int* input_4,
int* output, hipStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int w3, const int w4,
const half* input_1, const half* input_2, const half* input_3, const half* input_4,
half* output, hipStream_t cuda_stream);
|
e04c71e24326c7cc9c37e5b425f862d32e4e5885.cu
|
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include "kernel/gpu/cuda_impl/concatv2_impl.cuh"
template <typename T>
__global__ void Concat(const size_t size, const int w1, const int w2, const T* input_1, const T* input_2, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int n = pos / (w1 + w2);
int m = pos % (w1 + w2);
output[pos] = m >= w1 ? input_2[n * w2 + m - w1] : input_1[n * w1 + m];
}
return;
}
template <typename T>
__global__ void Concat(const size_t size, const int w1, const int w2, const int w3,
const T* input_1, const T* input_2, const T* input_3, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int n = pos / (w1 + w2 + w3);
int m = pos % (w1 + w2 + w3);
output[pos] = m < w1 ? input_1[n * w1 + m] :
m < w1 + w2 ? input_2[n * w2 + m - w1] :
input_3[n * w3 + m - w1 - w2];
}
return;
}
template <typename T>
__global__ void Concat(const size_t size, const int w1, const int w2, const int w3, const int w4,
const T* input_1, const T* input_2, const T* input_3, const T* input_4, T* output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int n = pos / (w1 + w2 + w3 + w4);
int m = pos % (w1 + w2 + w3 + w4);
output[pos] = m < w1 ? input_1[n * w1 + m] :
m < w1 + w2 ? input_2[n * w2 + m - w1]:
m < w1 + w2 + w3 ? input_3[n * w3 + m - w1 - w2]:
input_4[n * w4 + m - w1 - w2 - w3];
}
return;
}
template <typename T>
void ConcatKernel(const size_t size, const int w1, const int w2, const T* input_1, const T* input_2, T* output,
cudaStream_t cuda_stream) {
Concat<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, w1, w2, input_1, input_2, output);
return;
}
template <typename T>
void ConcatKernel(const size_t size, const int w1, const int w2, const int w3,
const T* input_1, const T* input_2, const T* input_3, T* output,
cudaStream_t cuda_stream) {
Concat<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, w1, w2, w3, input_1, input_2, input_3, output);
return;
}
template <typename T>
void ConcatKernel(const size_t size, const int w1, const int w2, const int w3, const int w4,
const T* input_1, const T* input_2, const T* input_3, const T* input_4, T* output,
cudaStream_t cuda_stream) {
Concat<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, w1, w2, w3, w4, input_1,
input_2, input_3, input_4, output);
return;
}
template void ConcatKernel(const size_t size, const int w1, const int w2, const float* input_1, const float* input_2,
float* output, cudaStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int* input_1, const int* input_2,
int* output, cudaStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const half* input_1, const half* input_2,
half* output, cudaStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int w3,
const float* input_1, const float* input_2, const float* input_3,
float* output, cudaStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int w3,
const int* input_1, const int* input_2, const int* input_3,
int* output, cudaStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int w3,
const half* input_1, const half* input_2, const half* input_3,
half* output, cudaStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int w3, const int w4,
const float* input_1, const float* input_2, const float* input_3, const float* input_4,
float* output, cudaStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int w3, const int w4,
const int* input_1, const int* input_2, const int* input_3, const int* input_4,
int* output, cudaStream_t cuda_stream);
template void ConcatKernel(const size_t size, const int w1, const int w2, const int w3, const int w4,
const half* input_1, const half* input_2, const half* input_3, const half* input_4,
half* output, cudaStream_t cuda_stream);
|
9aa23e47f6e317e732f53323eebff7c74b7634df.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cfloat>
#include "common.hpp"
#include "cumsum.hpp"
#define BLOCKSIZE 1024
// compare function for sort
template <typename idxT, typename T>
struct CompareSegmentGT {
CompareSegmentGT(int64_t segment_size): seg_size(segment_size) {}
__device__ bool operator()(const thrust::tuple<idxT, T, T> &lv, const thrust::tuple<idxT, T, T> &rv) {
idxT segl = thrust::get<0>(lv) / seg_size;
idxT segr = thrust::get<0>(rv) / seg_size;
if (segl == segr) {
return thrust::get<1>(lv) > thrust::get<1>(rv);
} else {
return segl < segr;
}
}
const int64_t seg_size;
};
// reduce function for shared memory
template<typename T>
class sum_op {
public:
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
class gt_op {
public:
__device__ __forceinline__ T operator()(T a, T b) const {
/* if (a > b) return a; */
/* else return b; */
return (a > b) ? a : b;
}
};
template<template<typename> class Reduction, typename scalar_t>
__device__ __forceinline__ void reduce_op(
scalar_t* sdata, int blocksize, const int tid,
const Reduction<scalar_t>& oper) {
__syncthreads();
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = oper(sdata[tid], sdata[tid + s]);
}
__syncthreads();
}
}
// kernel function for forward and backward
template<typename scalar_t>
__global__ void compute_errs(const int n_size, const int m_size,
const int ignore_index, const int64_t *labels,
scalar_t *errs, scalar_t *one_hot) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
const scalar_t one(1.);
const scalar_t minus_one(-1.);
for (int i{tid}; i < m_size; i+=stride) {
int e_ind;
// if ignore index, set values to minus, to send it rear
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
for (int j = 0; j < n_size; ++j) {
e_ind = j * m_size + i;
errs[e_ind] = minus_one;
}
continue;
}
e_ind = lb * m_size + i;
// set one hot values
one_hot[e_ind] = one;
// compute errs:
// errs = abs(lb_one_hot - softmax(logits.transpose(0, 1).view(c, -1)))
// (lb_one_hot - probs).abs()
errs[e_ind] = one - errs[e_ind];
}
}
template<typename scalar_t>
__global__ void compute_jacc_iou(scalar_t *output, scalar_t *tmp,
const int n_size, const int m_size) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw);
// load n_pos to shm, n_pos is the last column of cumsum
if (threadIdx.x < n_size) {
shared[threadIdx.x] = output[(threadIdx.x + 1) * m_size - 1];
}
__syncthreads();
int n_samples = n_size * m_size;
int t_size = gridDim.x * blockDim.x;
const scalar_t one(1);
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i{tid}; i < n_samples; i += t_size) {
int n_ind = i / m_size;
int m_ind = i % m_size;
scalar_t val = output[i];
scalar_t int_val = shared[n_ind] - val;
scalar_t uni_val = shared[n_ind] - val + scalar_t(m_ind + 1);
tmp[i] = one - int_val / uni_val;
}
}
template<typename scalar_t>
__global__ void compute_jacc_diff(scalar_t *errs, scalar_t *output,
scalar_t *tmp, const int *index,
const int n_size, const int m_size) {
int n_samples = n_size * m_size;
int t_size = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i{tid}; i < n_samples; i += t_size) {
int m_ind = i % m_size;
scalar_t val;
if (m_ind == 0) {
val = tmp[i];
} else {
val = tmp[i] - tmp[i - 1];
}
int ind = index[i];
output[ind] = val;
}
}
template<typename scalar_t>
__global__ void reorder_errs(const scalar_t *errs,
scalar_t *tmp, const int *index,
const int n_size, const int m_size) {
int n_samples = n_size * m_size;
int t_size = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i{tid}; i < n_samples; i += t_size) {
tmp[index[i]] = errs[i];
}
}
template<typename scalar_t>
__global__ void reorder_copy_back(scalar_t *errs, const scalar_t *tmp,
const int n_size, const int m_size) {
int n_samples = n_size * m_size;
int t_size = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i{tid}; i < n_samples; i += t_size) {
errs[i] = tmp[i];
}
}
template<typename scalar_t>
__global__ void mul_reduce_sum_by_row_per_block(scalar_t *errs,
const scalar_t *jacc, scalar_t *buf,
const int n_size, const int m_size) {
const scalar_t zero(0);
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw);
int bid = blockIdx.y;
int b_size = gridDim.y;
int tstride = blockDim.x * gridDim.x;
for (int i{bid}; i < n_size; i += b_size) {
shared[threadIdx.x] = zero;
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int j{tid}; j < m_size; j += tstride) {
int ind = m_size * i + j;
scalar_t err_val = errs[ind];
if (err_val < zero) err_val = zero; // bypass ignore index
shared[threadIdx.x] += err_val * jacc[ind];
}
__syncthreads();
reduce_op<sum_op, scalar_t>(shared, blockDim.x, threadIdx.x, sum_op<scalar_t>());
if (threadIdx.x == 0) {
int ind = i * gridDim.x + blockIdx.x;
buf[ind] = shared[0];
}
}
}
template<typename scalar_t>
__global__ void reduce_sum_by_row(const scalar_t *buf, scalar_t *loss ,
const int n_size, const int m_size) {
const scalar_t zero(0);
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw);
int bid = blockIdx.y;
int bstrd = gridDim.y;
for (int i{bid}; i < n_size; i += bstrd) {
shared[threadIdx.x] = zero;
__syncthreads();
int tid = threadIdx.x;
int tstrd = blockDim.x;
for (int j{tid}; j < m_size; j += tstrd) {
int ind = m_size * i + j;
shared[threadIdx.x] += buf[ind];
}
__syncthreads();
reduce_op<sum_op, scalar_t>(shared, blockDim.x, threadIdx.x, sum_op<scalar_t>());
if (threadIdx.x == 0) {
loss[i] = shared[0];
}
}
}
template<typename scalar_t>
__global__ void compute_probs_grad_and_transpose(const scalar_t *jacc,
const scalar_t *grad, scalar_t *grad_logits,
const int64_t *labels, const int n_size,
const int dimsize, const int m_size) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw);
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
const int samplesize = n_size * dimsize * m_size;
const int dm_size = dimsize * m_size;
// read to shared memory to save bandwidth
if (threadIdx.x < dimsize) {
shared[threadIdx.x] = grad[threadIdx.x];
}
__syncthreads();
int e_ind;
for (int i{tid}; i < samplesize; i += stride) {
int n_ind = i / dm_size;
int d_ind = i % dm_size;
int m_ind = d_ind % m_size;
d_ind = d_ind / m_size;
e_ind = n_ind * m_size + m_ind;
int lb = static_cast<int>(labels[e_ind]);
int e_ind = d_ind * n_size * m_size + n_ind * m_size + m_ind;
// grad = -1 if j == lb else 1
if (lb == d_ind) {
grad_logits[i] = - jacc[e_ind] * shared[d_ind];
} else {
grad_logits[i] = jacc[e_ind] * shared[d_ind];
}
}
}
template<typename scalar_t>
__global__ void compute_softmax_shallow(const int n_size, const int dimsize,
const int m_size, const scalar_t *logits,
scalar_t *softmax) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int n_samples = m_size * n_size;
const scalar_t one(1.);
for (int i{tid}; i < n_samples; i+=stride) {
int n_idx = i / m_size;
int m_idx = i % m_size;
int e_idx;
// find max val
scalar_t max_val(-10000.);
for (int j{0}; j < dimsize; ++j) {
e_idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[e_idx];
if (val > max_val) max_val = val;
}
// compute exp sum
scalar_t exp_sum_val(0.);
for (int j{0}; j < dimsize; ++j) {
e_idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[e_idx];
exp_sum_val += math_ops::Exp(val - max_val);
}
exp_sum_val = one / exp_sum_val;
// compute softmax
for (int j{0}; j < dimsize; ++j) {
e_idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[e_idx];
softmax[e_idx] = math_ops::Exp(val - max_val) * exp_sum_val;
}
}
}
template<typename scalar_t>
__global__ void compute_softmax_deep(const int n_size, const int dimsize,
const int m_size, const scalar_t *logits,
scalar_t *softmax) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw);
shared += blockDim.y * threadIdx.x;
const int samplesize = n_size * m_size;
const scalar_t one(1.);
int tid = threadIdx.y;
int sid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{sid}; i < samplesize; i += stride) {
int e_idx;
int n_idx = i / m_size;
int m_idx = i % m_size;
// find max val
shared[tid] = scalar_t(-10000.);
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.y) {
e_idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[e_idx];
if (val > shared[tid]) shared[tid] = val;
}
__syncthreads();
reduce_op<gt_op, scalar_t>(shared, blockDim.y, threadIdx.y, gt_op<scalar_t>());
scalar_t max_val = shared[0];
__syncthreads();
// find exp sum val
shared[tid] = scalar_t(0.);
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.y) {
e_idx = n_idx * dimsize * m_size + j * m_size + m_idx;
shared[tid] += math_ops::Exp(logits[e_idx] - max_val);
}
__syncthreads();
reduce_op<sum_op, scalar_t>(shared, blockDim.y, threadIdx.y, sum_op<scalar_t>());
if (tid == 0) shared[0] = one / shared[0];
__syncthreads();
// compute softmax
for (int j{tid}; j < dimsize; j += blockDim.y) {
e_idx = n_idx * dimsize * m_size + j * m_size + m_idx;
softmax[e_idx] = math_ops::Exp(logits[e_idx] - max_val) * shared[0];
}
}
}
template<typename scalar_t>
__global__ void compute_logits_grad_shallow(const int n_size, const int dimsize,
const int m_size, const int ignore_index,
const scalar_t *jacc, scalar_t *grad_logits,
const int64_t *labels) {
const scalar_t zero(0.);
const int samplesize = n_size * m_size;
int sid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
// compute grad of logits, store in grad_logits, jacc is softmax
for (int i{sid}; i < samplesize; i += stride) {
int n_ind = i / m_size;
int m_ind = i % m_size;
int e_ind;
// set grad of ignored index to be 0
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
for (int j{0}; j < dimsize; ++j) {
e_ind = n_ind * dimsize * m_size + j * m_size + m_ind;
grad_logits[e_ind] = zero;
}
continue;
}
scalar_t sum(0);
for (int j{0}; j < dimsize; ++j) {
e_ind = n_ind * dimsize * m_size + j * m_size + m_ind;
sum -= jacc[e_ind] * grad_logits[e_ind];
}
for (int j{0}; j < dimsize; ++j) {
e_ind = n_ind * dimsize * m_size + j * m_size + m_ind;
grad_logits[e_ind] = jacc[e_ind] * (sum + grad_logits[e_ind]);
}
}
}
template<typename scalar_t>
__global__ void compute_logits_grad_deep(const int n_size, const int dimsize,
const int m_size, const int ignore_index,
const scalar_t *jacc, scalar_t *grad_logits,
const int64_t *labels) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw);
const scalar_t zero(0.);
const int samplesize = n_size * m_size;
const int shm_offset = blockDim.y * threadIdx.x;
shared += shm_offset;
int tid = threadIdx.y;
int sid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
// compute grad of logits, store in grad_logits, jacc is softmax
for (int i{sid}; i < samplesize; i += stride) {
int n_ind = i / m_size;
int m_ind = i % m_size;
int e_ind;
// set grad of ignored index to be 0
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
for (int j{tid}; j < dimsize; j += blockDim.y) {
e_ind = n_ind * dimsize * m_size + j * m_size + m_ind;
grad_logits[e_ind] = zero;
}
continue;
}
shared[tid] = zero;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.y) {
e_ind = n_ind * dimsize * m_size + j * m_size + m_ind;
shared[tid] -= jacc[e_ind] * grad_logits[e_ind];
}
__syncthreads();
reduce_op<sum_op, scalar_t>(shared, blockDim.y, threadIdx.y, sum_op<scalar_t>());
for (int j{tid}; j < dimsize; j += blockDim.y) {
e_ind = n_ind * dimsize * m_size + j * m_size + m_ind;
grad_logits[e_ind] = jacc[e_ind] * (grad_logits[e_ind] + shared[0]);
}
__syncthreads();
}
}
template<typename scalar_t>
__global__ void transpose_softmax(const int n_size, const int dimsize,
const int m_size, scalar_t *from, scalar_t *to) {
const int samplesize = n_size * dimsize * m_size;
const int dm_size = dimsize * m_size;
const scalar_t zero(0.);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < samplesize; i += stride) {
int n_ind = i / dm_size;
int d_ind = i % dm_size;
int m_ind = d_ind % m_size;
d_ind = d_ind / m_size;
int e_ind = d_ind * n_size * m_size + n_ind * m_size + m_ind;
to[e_ind] = from[i];
from[i] = zero;
}
}
void LovaszComputeErrsOneHot(const at::Tensor &logits, const at::Tensor &labels,
at::Tensor &errs, at::Tensor &jacc,
const int ignore_index) {
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
int blockx, blocky, gridx;
dim3 block, grid;
if (dimsize < 32) {
block = dim3(BLOCKSIZE);
grid = dim3(::max(1, ::min(samplesize / BLOCKSIZE, 4096)));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "lovasz forward softmax", [&] {
hipLaunchKernelGGL(( compute_softmax_shallow<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
jacc.contiguous().data_ptr<scalar_t>() // store softmax
);
});
} else {
blocky = 32;
while (blocky < dimsize) blocky <<= 1;
blocky >>= 1;
blocky = ::min(::max(1, blocky), BLOCKSIZE);
blockx = BLOCKSIZE / blocky;
gridx = ::min(4096, ::max(1, samplesize / blockx));
block = dim3(blockx, blocky);
grid = dim3(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "lovasz forward softmax", [&] {
int shm_size = sizeof(scalar_t) * BLOCKSIZE;
hipLaunchKernelGGL(( compute_softmax_deep<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
jacc.contiguous().data_ptr<scalar_t>() // store softmax
);
});
}
block = dim3(BLOCKSIZE);
grid = dim3(::max(1, ::min(samplesize * dimsize / BLOCKSIZE, 4096)));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "lovasz transpose softmax", [&] {
hipLaunchKernelGGL(( transpose_softmax<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
jacc.contiguous().data_ptr<scalar_t>(), // set jacc to all 0
errs.contiguous().data_ptr<scalar_t>());
});
grid = dim3(::max(1, ::min(samplesize / BLOCKSIZE, 4096)));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "lovasz forwarderrs and one hot", [&] {
hipLaunchKernelGGL(( compute_errs<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
dimsize, samplesize, ignore_index,
labels.contiguous().data_ptr<int64_t>(),
errs.contiguous().data_ptr<scalar_t>(),
jacc.contiguous().data_ptr<scalar_t>() // jacc is one hot here
);
});
}
void LovaszComputeJacc(at::Tensor &errs, at::Tensor &output) {
int n_samples = errs.size(1);
int dimsize = errs.size(0);
auto tmp = at::empty_like(errs);
dim3 block(BLOCKSIZE);
dim3 grid(max(min((int)tmp.numel() / BLOCKSIZE, 4096), 1));
// sort errs, together with one hot and obtain the order index
thrust::device_vector<int> index(n_samples * dimsize);
thrust::sequence(thrust::device, index.begin(), index.end(), 0, 1);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "jacc sort", [&] {
thrust::device_ptr<scalar_t> errs_ptr(errs.data_ptr<scalar_t>());
thrust::device_ptr<scalar_t> output_ptr(output.data_ptr<scalar_t>());
auto begin = thrust::make_zip_iterator(thrust::make_tuple(
index.begin(), errs_ptr, output_ptr));
thrust::sort(
thrust::device, begin, begin + errs.numel(),
CompareSegmentGT<int, scalar_t>(n_samples));
});
// cumsum
cumsum_2d_by_row_v2(output);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "jacc forward steps", [&] {
// compute iou, store in temp memory of tmp, n_pos is the last colum of cumsum
int shm = sizeof(scalar_t) * BLOCKSIZE;
hipLaunchKernelGGL(( compute_jacc_iou<scalar_t>), dim3(grid), dim3(block), shm, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output.data_ptr<scalar_t>(),
tmp.data_ptr<scalar_t>(),
dimsize, n_samples);
// compute iou difference from tmp and store at output, then copy errs to tmp
// to prepare for re-order of errs
hipLaunchKernelGGL(( compute_jacc_diff<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
errs.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
tmp.data_ptr<scalar_t>(),
thrust::raw_pointer_cast(&index[0]),
dimsize, n_samples);
// re-order errs and copy to tmp
hipLaunchKernelGGL(( reorder_errs<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
errs.data_ptr<scalar_t>(),
tmp.data_ptr<scalar_t>(),
thrust::raw_pointer_cast(&index[0]),
dimsize, n_samples);
// copy back from tmp to errs
hipLaunchKernelGGL(( reorder_copy_back<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
errs.data_ptr<scalar_t>(),
tmp.data_ptr<scalar_t>(),
dimsize, n_samples);
});
}
void LovaszComputeLoss(const at::Tensor &errs, const at::Tensor &jacc, const at::Tensor &loss) {
const int n_size = errs.size(0);
const int m_size = errs.size(1);
// parallel strategy
int gridy = 2;
while (gridy < n_size && gridy <= 32) gridy <<= 1;
gridy >>= 1;
gridy = ::max(1, gridy); // limit the parallel number of rows within 1 and 32
int gridx = ::max(::min(m_size / BLOCKSIZE, 4096 / gridy), 1);
dim3 block(BLOCKSIZE);
dim3 grid(gridx, gridy);
// allocate memory and cuda grid/block
auto buf = at::empty({n_size, gridx}, errs.options());
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "compute loss per block", [&] {
// multiply and reduce within each kernel
int shm = sizeof(scalar_t) * BLOCKSIZE;
hipLaunchKernelGGL(( mul_reduce_sum_by_row_per_block<scalar_t>), dim3(grid), dim3(block), shm, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
errs.data_ptr<scalar_t>(),
jacc.data_ptr<scalar_t>(),
buf.data_ptr<scalar_t>(),
n_size, m_size);
});
int blockx = 2;
while (blockx < gridx) blockx <<= 1;
if (blockx > BLOCKSIZE) blockx = BLOCKSIZE;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "compute loss reduce block", [&] {
// reduce sum among blocks
int shm = sizeof(scalar_t) * blockx;
hipLaunchKernelGGL(( reduce_sum_by_row<scalar_t>), dim3(dim3(1, gridy)), dim3(dim3(blockx)), shm, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
buf.data_ptr<scalar_t>(),
loss.data_ptr<scalar_t>(),
n_size, gridx);
});
}
/* Method */
std::tuple<at::Tensor, at::Tensor> Lovasz_softmax_forward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
AT_ASSERTM(logits.numel() < (1L << 31), "input tensor too large, int32 type will overflow");
AT_ASSERTM(logits.size(1) < BLOCKSIZE, "num of classes should be less than BLOCKSIZE");
// allocate memory and cuda grid/block
const int dimsize = logits.size(1);
auto errs = at::empty_like(logits).reshape({dimsize, -1});
auto jacc = at::empty_like(logits).reshape({dimsize, -1});
auto loss = at::empty({dimsize}, logits.options());
if (errs.numel() == 0 | jacc.numel() == 0 | loss.numel() == 0) {
THCudaCheck(hipGetLastError());
return std::make_tuple(errs, jacc);
}
// Compute errs and one hot
LovaszComputeErrsOneHot(logits, labels, errs, jacc, ignore_index);
// compute jacc index, which is re-ordered to the original order
// so that we could re-use it in backward pass
LovaszComputeJacc(errs, jacc);
// reduce sum operation
LovaszComputeLoss(errs, jacc, loss);
THCudaCheck(hipGetLastError());
return std::make_tuple(loss, jacc);
}
at::Tensor Lovasz_softmax_backward_cuda(const at::Tensor &grad, const at::Tensor &logits,
const at::Tensor &labels, const at::Tensor jacc,
const int64_t ignore_index) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
// allocate memory and cuda grid/block
auto grad_logits = at::empty_like(logits);
// call kernel
int blockx, blocky, gridx;
dim3 block, grid;
gridx = ::max(1, ::min(samplesize * dimsize / BLOCKSIZE, 4096));
block = dim3(BLOCKSIZE);
grid = dim3(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "lovasz backward probs", [&] {
// compute grad of probs, just multiply to jacc
// store at grad_logits and change from dnm to ndm layout
int shm = BLOCKSIZE * sizeof(scalar_t);
hipLaunchKernelGGL(( compute_probs_grad_and_transpose<scalar_t>), dim3(grid), dim3(block), shm, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
jacc.contiguous().data_ptr<scalar_t>(),
grad.contiguous().data_ptr<scalar_t>(),
grad_logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
n_size, dimsize, m_size);
});
// from now on, grad_probs is stored in grad_logits, softmax is on jacc
// compute grad of logits, store it grad_logits
if (dimsize < 32) {
block = dim3(BLOCKSIZE);
grid = dim3(::max(1, ::min(samplesize / BLOCKSIZE, 4096)));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "lovasz backward logits", [&] {
hipLaunchKernelGGL(( compute_softmax_shallow<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
jacc.contiguous().data_ptr<scalar_t>() // store softmax
);
hipLaunchKernelGGL(( compute_logits_grad_shallow<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size, ignore_index,
jacc.contiguous().data_ptr<scalar_t>(),
grad_logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>()
);
});
} else {
blocky = 32;
while (blocky < dimsize) blocky <<= 1;
blocky >>= 1;
blocky = ::min(::max(1, blocky), BLOCKSIZE);
blockx = BLOCKSIZE / blocky;
gridx = ::min(4096, ::max(1, samplesize / blockx));
block = dim3(blockx, blocky);
grid = dim3(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "lovasz backward logits", [&] {
int shm_size = sizeof(scalar_t) * BLOCKSIZE;
hipLaunchKernelGGL(( compute_softmax_deep<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
jacc.contiguous().data_ptr<scalar_t>() // store softmax
);
hipLaunchKernelGGL(( compute_logits_grad_deep<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size, ignore_index,
jacc.contiguous().data_ptr<scalar_t>(),
grad_logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>()
);
});
}
THCudaCheck(hipGetLastError());
return grad_logits;
}
// python inferface
std::tuple<at::Tensor, at::Tensor> Lovasz_softmax_forward(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index) {
if (logits.device().type() != c10::kCUDA) {
AT_ERROR("this lovasz softmax function only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return Lovasz_softmax_forward_cuda(logits, labels, ignore_index);
}
at::Tensor Lovasz_softmax_backward(const at::Tensor &grad, const at::Tensor &logits,
const at::Tensor &labels, at::Tensor jacc,
const int64_t ignore_index) {
// TODO: try AT_ASSERTM
if (logits.device().type() != c10::kCUDA || labels.device().type() != c10::kCUDA) {
AT_ERROR("this lovasz softmax function only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return Lovasz_softmax_backward_cuda(grad, logits, labels, jacc, ignore_index);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("lovasz_softmax_forward", &Lovasz_softmax_forward, "lovasz softmax forward");
m.def("lovasz_softmax_backward", &Lovasz_softmax_backward, "lovasz softmax backward");
}
|
9aa23e47f6e317e732f53323eebff7c74b7634df.cu
|
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cfloat>
#include "common.hpp"
#include "cumsum.hpp"
#define BLOCKSIZE 1024
// compare function for sort
template <typename idxT, typename T>
struct CompareSegmentGT {
CompareSegmentGT(int64_t segment_size): seg_size(segment_size) {}
__device__ bool operator()(const thrust::tuple<idxT, T, T> &lv, const thrust::tuple<idxT, T, T> &rv) {
idxT segl = thrust::get<0>(lv) / seg_size;
idxT segr = thrust::get<0>(rv) / seg_size;
if (segl == segr) {
return thrust::get<1>(lv) > thrust::get<1>(rv);
} else {
return segl < segr;
}
}
const int64_t seg_size;
};
// reduce function for shared memory
template<typename T>
class sum_op {
public:
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
class gt_op {
public:
__device__ __forceinline__ T operator()(T a, T b) const {
/* if (a > b) return a; */
/* else return b; */
return (a > b) ? a : b;
}
};
template<template<typename> class Reduction, typename scalar_t>
__device__ __forceinline__ void reduce_op(
scalar_t* sdata, int blocksize, const int tid,
const Reduction<scalar_t>& oper) {
__syncthreads();
for (int s{blocksize / 2}; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = oper(sdata[tid], sdata[tid + s]);
}
__syncthreads();
}
}
// kernel function for forward and backward
template<typename scalar_t>
__global__ void compute_errs(const int n_size, const int m_size,
const int ignore_index, const int64_t *labels,
scalar_t *errs, scalar_t *one_hot) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
const scalar_t one(1.);
const scalar_t minus_one(-1.);
for (int i{tid}; i < m_size; i+=stride) {
int e_ind;
// if ignore index, set values to minus, to send it rear
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
for (int j = 0; j < n_size; ++j) {
e_ind = j * m_size + i;
errs[e_ind] = minus_one;
}
continue;
}
e_ind = lb * m_size + i;
// set one hot values
one_hot[e_ind] = one;
// compute errs:
// errs = abs(lb_one_hot - softmax(logits.transpose(0, 1).view(c, -1)))
// (lb_one_hot - probs).abs()
errs[e_ind] = one - errs[e_ind];
}
}
template<typename scalar_t>
__global__ void compute_jacc_iou(scalar_t *output, scalar_t *tmp,
const int n_size, const int m_size) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw);
// load n_pos to shm, n_pos is the last column of cumsum
if (threadIdx.x < n_size) {
shared[threadIdx.x] = output[(threadIdx.x + 1) * m_size - 1];
}
__syncthreads();
int n_samples = n_size * m_size;
int t_size = gridDim.x * blockDim.x;
const scalar_t one(1);
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i{tid}; i < n_samples; i += t_size) {
int n_ind = i / m_size;
int m_ind = i % m_size;
scalar_t val = output[i];
scalar_t int_val = shared[n_ind] - val;
scalar_t uni_val = shared[n_ind] - val + scalar_t(m_ind + 1);
tmp[i] = one - int_val / uni_val;
}
}
template<typename scalar_t>
__global__ void compute_jacc_diff(scalar_t *errs, scalar_t *output,
scalar_t *tmp, const int *index,
const int n_size, const int m_size) {
int n_samples = n_size * m_size;
int t_size = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i{tid}; i < n_samples; i += t_size) {
int m_ind = i % m_size;
scalar_t val;
if (m_ind == 0) {
val = tmp[i];
} else {
val = tmp[i] - tmp[i - 1];
}
int ind = index[i];
output[ind] = val;
}
}
template<typename scalar_t>
__global__ void reorder_errs(const scalar_t *errs,
scalar_t *tmp, const int *index,
const int n_size, const int m_size) {
int n_samples = n_size * m_size;
int t_size = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i{tid}; i < n_samples; i += t_size) {
tmp[index[i]] = errs[i];
}
}
template<typename scalar_t>
__global__ void reorder_copy_back(scalar_t *errs, const scalar_t *tmp,
const int n_size, const int m_size) {
int n_samples = n_size * m_size;
int t_size = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i{tid}; i < n_samples; i += t_size) {
errs[i] = tmp[i];
}
}
template<typename scalar_t>
__global__ void mul_reduce_sum_by_row_per_block(scalar_t *errs,
const scalar_t *jacc, scalar_t *buf,
const int n_size, const int m_size) {
const scalar_t zero(0);
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw);
int bid = blockIdx.y;
int b_size = gridDim.y;
int tstride = blockDim.x * gridDim.x;
for (int i{bid}; i < n_size; i += b_size) {
shared[threadIdx.x] = zero;
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int j{tid}; j < m_size; j += tstride) {
int ind = m_size * i + j;
scalar_t err_val = errs[ind];
if (err_val < zero) err_val = zero; // bypass ignore index
shared[threadIdx.x] += err_val * jacc[ind];
}
__syncthreads();
reduce_op<sum_op, scalar_t>(shared, blockDim.x, threadIdx.x, sum_op<scalar_t>());
if (threadIdx.x == 0) {
int ind = i * gridDim.x + blockIdx.x;
buf[ind] = shared[0];
}
}
}
template<typename scalar_t>
__global__ void reduce_sum_by_row(const scalar_t *buf, scalar_t *loss ,
const int n_size, const int m_size) {
const scalar_t zero(0);
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw);
int bid = blockIdx.y;
int bstrd = gridDim.y;
for (int i{bid}; i < n_size; i += bstrd) {
shared[threadIdx.x] = zero;
__syncthreads();
int tid = threadIdx.x;
int tstrd = blockDim.x;
for (int j{tid}; j < m_size; j += tstrd) {
int ind = m_size * i + j;
shared[threadIdx.x] += buf[ind];
}
__syncthreads();
reduce_op<sum_op, scalar_t>(shared, blockDim.x, threadIdx.x, sum_op<scalar_t>());
if (threadIdx.x == 0) {
loss[i] = shared[0];
}
}
}
template<typename scalar_t>
__global__ void compute_probs_grad_and_transpose(const scalar_t *jacc,
const scalar_t *grad, scalar_t *grad_logits,
const int64_t *labels, const int n_size,
const int dimsize, const int m_size) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw);
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
const int samplesize = n_size * dimsize * m_size;
const int dm_size = dimsize * m_size;
// read to shared memory to save bandwidth
if (threadIdx.x < dimsize) {
shared[threadIdx.x] = grad[threadIdx.x];
}
__syncthreads();
int e_ind;
for (int i{tid}; i < samplesize; i += stride) {
int n_ind = i / dm_size;
int d_ind = i % dm_size;
int m_ind = d_ind % m_size;
d_ind = d_ind / m_size;
e_ind = n_ind * m_size + m_ind;
int lb = static_cast<int>(labels[e_ind]);
int e_ind = d_ind * n_size * m_size + n_ind * m_size + m_ind;
// grad = -1 if j == lb else 1
if (lb == d_ind) {
grad_logits[i] = - jacc[e_ind] * shared[d_ind];
} else {
grad_logits[i] = jacc[e_ind] * shared[d_ind];
}
}
}
template<typename scalar_t>
__global__ void compute_softmax_shallow(const int n_size, const int dimsize,
const int m_size, const scalar_t *logits,
scalar_t *softmax) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int n_samples = m_size * n_size;
const scalar_t one(1.);
for (int i{tid}; i < n_samples; i+=stride) {
int n_idx = i / m_size;
int m_idx = i % m_size;
int e_idx;
// find max val
scalar_t max_val(-10000.);
for (int j{0}; j < dimsize; ++j) {
e_idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[e_idx];
if (val > max_val) max_val = val;
}
// compute exp sum
scalar_t exp_sum_val(0.);
for (int j{0}; j < dimsize; ++j) {
e_idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[e_idx];
exp_sum_val += math_ops::Exp(val - max_val);
}
exp_sum_val = one / exp_sum_val;
// compute softmax
for (int j{0}; j < dimsize; ++j) {
e_idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[e_idx];
softmax[e_idx] = math_ops::Exp(val - max_val) * exp_sum_val;
}
}
}
template<typename scalar_t>
__global__ void compute_softmax_deep(const int n_size, const int dimsize,
const int m_size, const scalar_t *logits,
scalar_t *softmax) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw);
shared += blockDim.y * threadIdx.x;
const int samplesize = n_size * m_size;
const scalar_t one(1.);
int tid = threadIdx.y;
int sid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{sid}; i < samplesize; i += stride) {
int e_idx;
int n_idx = i / m_size;
int m_idx = i % m_size;
// find max val
shared[tid] = scalar_t(-10000.);
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.y) {
e_idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[e_idx];
if (val > shared[tid]) shared[tid] = val;
}
__syncthreads();
reduce_op<gt_op, scalar_t>(shared, blockDim.y, threadIdx.y, gt_op<scalar_t>());
scalar_t max_val = shared[0];
__syncthreads();
// find exp sum val
shared[tid] = scalar_t(0.);
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.y) {
e_idx = n_idx * dimsize * m_size + j * m_size + m_idx;
shared[tid] += math_ops::Exp(logits[e_idx] - max_val);
}
__syncthreads();
reduce_op<sum_op, scalar_t>(shared, blockDim.y, threadIdx.y, sum_op<scalar_t>());
if (tid == 0) shared[0] = one / shared[0];
__syncthreads();
// compute softmax
for (int j{tid}; j < dimsize; j += blockDim.y) {
e_idx = n_idx * dimsize * m_size + j * m_size + m_idx;
softmax[e_idx] = math_ops::Exp(logits[e_idx] - max_val) * shared[0];
}
}
}
template<typename scalar_t>
__global__ void compute_logits_grad_shallow(const int n_size, const int dimsize,
const int m_size, const int ignore_index,
const scalar_t *jacc, scalar_t *grad_logits,
const int64_t *labels) {
const scalar_t zero(0.);
const int samplesize = n_size * m_size;
int sid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
// compute grad of logits, store in grad_logits, jacc is softmax
for (int i{sid}; i < samplesize; i += stride) {
int n_ind = i / m_size;
int m_ind = i % m_size;
int e_ind;
// set grad of ignored index to be 0
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
for (int j{0}; j < dimsize; ++j) {
e_ind = n_ind * dimsize * m_size + j * m_size + m_ind;
grad_logits[e_ind] = zero;
}
continue;
}
scalar_t sum(0);
for (int j{0}; j < dimsize; ++j) {
e_ind = n_ind * dimsize * m_size + j * m_size + m_ind;
sum -= jacc[e_ind] * grad_logits[e_ind];
}
for (int j{0}; j < dimsize; ++j) {
e_ind = n_ind * dimsize * m_size + j * m_size + m_ind;
grad_logits[e_ind] = jacc[e_ind] * (sum + grad_logits[e_ind]);
}
}
}
template<typename scalar_t>
__global__ void compute_logits_grad_deep(const int n_size, const int dimsize,
const int m_size, const int ignore_index,
const scalar_t *jacc, scalar_t *grad_logits,
const int64_t *labels) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *shared = reinterpret_cast<scalar_t*>(sdata_raw);
const scalar_t zero(0.);
const int samplesize = n_size * m_size;
const int shm_offset = blockDim.y * threadIdx.x;
shared += shm_offset;
int tid = threadIdx.y;
int sid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
// compute grad of logits, store in grad_logits, jacc is softmax
for (int i{sid}; i < samplesize; i += stride) {
int n_ind = i / m_size;
int m_ind = i % m_size;
int e_ind;
// set grad of ignored index to be 0
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
for (int j{tid}; j < dimsize; j += blockDim.y) {
e_ind = n_ind * dimsize * m_size + j * m_size + m_ind;
grad_logits[e_ind] = zero;
}
continue;
}
shared[tid] = zero;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.y) {
e_ind = n_ind * dimsize * m_size + j * m_size + m_ind;
shared[tid] -= jacc[e_ind] * grad_logits[e_ind];
}
__syncthreads();
reduce_op<sum_op, scalar_t>(shared, blockDim.y, threadIdx.y, sum_op<scalar_t>());
for (int j{tid}; j < dimsize; j += blockDim.y) {
e_ind = n_ind * dimsize * m_size + j * m_size + m_ind;
grad_logits[e_ind] = jacc[e_ind] * (grad_logits[e_ind] + shared[0]);
}
__syncthreads();
}
}
template<typename scalar_t>
__global__ void transpose_softmax(const int n_size, const int dimsize,
const int m_size, scalar_t *from, scalar_t *to) {
const int samplesize = n_size * dimsize * m_size;
const int dm_size = dimsize * m_size;
const scalar_t zero(0.);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < samplesize; i += stride) {
int n_ind = i / dm_size;
int d_ind = i % dm_size;
int m_ind = d_ind % m_size;
d_ind = d_ind / m_size;
int e_ind = d_ind * n_size * m_size + n_ind * m_size + m_ind;
to[e_ind] = from[i];
from[i] = zero;
}
}
void LovaszComputeErrsOneHot(const at::Tensor &logits, const at::Tensor &labels,
at::Tensor &errs, at::Tensor &jacc,
const int ignore_index) {
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
int blockx, blocky, gridx;
dim3 block, grid;
if (dimsize < 32) {
block = dim3(BLOCKSIZE);
grid = dim3(std::max(1, std::min(samplesize / BLOCKSIZE, 4096)));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "lovasz forward softmax", [&] {
compute_softmax_shallow<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
jacc.contiguous().data_ptr<scalar_t>() // store softmax
);
});
} else {
blocky = 32;
while (blocky < dimsize) blocky <<= 1;
blocky >>= 1;
blocky = std::min(std::max(1, blocky), BLOCKSIZE);
blockx = BLOCKSIZE / blocky;
gridx = std::min(4096, std::max(1, samplesize / blockx));
block = dim3(blockx, blocky);
grid = dim3(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "lovasz forward softmax", [&] {
int shm_size = sizeof(scalar_t) * BLOCKSIZE;
compute_softmax_deep<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
jacc.contiguous().data_ptr<scalar_t>() // store softmax
);
});
}
block = dim3(BLOCKSIZE);
grid = dim3(std::max(1, std::min(samplesize * dimsize / BLOCKSIZE, 4096)));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "lovasz transpose softmax", [&] {
transpose_softmax<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
jacc.contiguous().data_ptr<scalar_t>(), // set jacc to all 0
errs.contiguous().data_ptr<scalar_t>());
});
grid = dim3(std::max(1, std::min(samplesize / BLOCKSIZE, 4096)));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "lovasz forwarderrs and one hot", [&] {
compute_errs<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
dimsize, samplesize, ignore_index,
labels.contiguous().data_ptr<int64_t>(),
errs.contiguous().data_ptr<scalar_t>(),
jacc.contiguous().data_ptr<scalar_t>() // jacc is one hot here
);
});
}
void LovaszComputeJacc(at::Tensor &errs, at::Tensor &output) {
int n_samples = errs.size(1);
int dimsize = errs.size(0);
auto tmp = at::empty_like(errs);
dim3 block(BLOCKSIZE);
dim3 grid(max(min((int)tmp.numel() / BLOCKSIZE, 4096), 1));
// sort errs, together with one hot and obtain the order index
thrust::device_vector<int> index(n_samples * dimsize);
thrust::sequence(thrust::device, index.begin(), index.end(), 0, 1);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "jacc sort", [&] {
thrust::device_ptr<scalar_t> errs_ptr(errs.data_ptr<scalar_t>());
thrust::device_ptr<scalar_t> output_ptr(output.data_ptr<scalar_t>());
auto begin = thrust::make_zip_iterator(thrust::make_tuple(
index.begin(), errs_ptr, output_ptr));
thrust::sort(
thrust::device, begin, begin + errs.numel(),
CompareSegmentGT<int, scalar_t>(n_samples));
});
// cumsum
cumsum_2d_by_row_v2(output);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "jacc forward steps", [&] {
// compute iou, store in temp memory of tmp, n_pos is the last colum of cumsum
int shm = sizeof(scalar_t) * BLOCKSIZE;
compute_jacc_iou<scalar_t><<<grid, block, shm, at::cuda::getCurrentCUDAStream()>>>(
output.data_ptr<scalar_t>(),
tmp.data_ptr<scalar_t>(),
dimsize, n_samples);
// compute iou difference from tmp and store at output, then copy errs to tmp
// to prepare for re-order of errs
compute_jacc_diff<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
errs.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
tmp.data_ptr<scalar_t>(),
thrust::raw_pointer_cast(&index[0]),
dimsize, n_samples);
// re-order errs and copy to tmp
reorder_errs<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
errs.data_ptr<scalar_t>(),
tmp.data_ptr<scalar_t>(),
thrust::raw_pointer_cast(&index[0]),
dimsize, n_samples);
// copy back from tmp to errs
reorder_copy_back<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
errs.data_ptr<scalar_t>(),
tmp.data_ptr<scalar_t>(),
dimsize, n_samples);
});
}
void LovaszComputeLoss(const at::Tensor &errs, const at::Tensor &jacc, const at::Tensor &loss) {
const int n_size = errs.size(0);
const int m_size = errs.size(1);
// parallel strategy
int gridy = 2;
while (gridy < n_size && gridy <= 32) gridy <<= 1;
gridy >>= 1;
gridy = std::max(1, gridy); // limit the parallel number of rows within 1 and 32
int gridx = std::max(std::min(m_size / BLOCKSIZE, 4096 / gridy), 1);
dim3 block(BLOCKSIZE);
dim3 grid(gridx, gridy);
// allocate memory and cuda grid/block
auto buf = at::empty({n_size, gridx}, errs.options());
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "compute loss per block", [&] {
// multiply and reduce within each kernel
int shm = sizeof(scalar_t) * BLOCKSIZE;
mul_reduce_sum_by_row_per_block<scalar_t><<<grid, block, shm, at::cuda::getCurrentCUDAStream()>>>(
errs.data_ptr<scalar_t>(),
jacc.data_ptr<scalar_t>(),
buf.data_ptr<scalar_t>(),
n_size, m_size);
});
int blockx = 2;
while (blockx < gridx) blockx <<= 1;
if (blockx > BLOCKSIZE) blockx = BLOCKSIZE;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(errs.scalar_type(), "compute loss reduce block", [&] {
// reduce sum among blocks
int shm = sizeof(scalar_t) * blockx;
reduce_sum_by_row<scalar_t><<<dim3(1, gridy), dim3(blockx), shm, at::cuda::getCurrentCUDAStream()>>>(
buf.data_ptr<scalar_t>(),
loss.data_ptr<scalar_t>(),
n_size, gridx);
});
}
/* Method */
std::tuple<at::Tensor, at::Tensor> Lovasz_softmax_forward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
AT_ASSERTM(logits.numel() < (1L << 31), "input tensor too large, int32 type will overflow");
AT_ASSERTM(logits.size(1) < BLOCKSIZE, "num of classes should be less than BLOCKSIZE");
// allocate memory and cuda grid/block
const int dimsize = logits.size(1);
auto errs = at::empty_like(logits).reshape({dimsize, -1});
auto jacc = at::empty_like(logits).reshape({dimsize, -1});
auto loss = at::empty({dimsize}, logits.options());
if (errs.numel() == 0 | jacc.numel() == 0 | loss.numel() == 0) {
THCudaCheck(cudaGetLastError());
return std::make_tuple(errs, jacc);
}
// Compute errs and one hot
LovaszComputeErrsOneHot(logits, labels, errs, jacc, ignore_index);
// compute jacc index, which is re-ordered to the original order
// so that we could re-use it in backward pass
LovaszComputeJacc(errs, jacc);
// reduce sum operation
LovaszComputeLoss(errs, jacc, loss);
THCudaCheck(cudaGetLastError());
return std::make_tuple(loss, jacc);
}
at::Tensor Lovasz_softmax_backward_cuda(const at::Tensor &grad, const at::Tensor &logits,
const at::Tensor &labels, const at::Tensor jacc,
const int64_t ignore_index) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
// allocate memory and cuda grid/block
auto grad_logits = at::empty_like(logits);
// call kernel
int blockx, blocky, gridx;
dim3 block, grid;
gridx = std::max(1, std::min(samplesize * dimsize / BLOCKSIZE, 4096));
block = dim3(BLOCKSIZE);
grid = dim3(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "lovasz backward probs", [&] {
// compute grad of probs, just multiply to jacc
// store at grad_logits and change from dnm to ndm layout
int shm = BLOCKSIZE * sizeof(scalar_t);
compute_probs_grad_and_transpose<scalar_t><<<grid, block, shm, at::cuda::getCurrentCUDAStream()>>>(
jacc.contiguous().data_ptr<scalar_t>(),
grad.contiguous().data_ptr<scalar_t>(),
grad_logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
n_size, dimsize, m_size);
});
// from now on, grad_probs is stored in grad_logits, softmax is on jacc
// compute grad of logits, store it grad_logits
if (dimsize < 32) {
block = dim3(BLOCKSIZE);
grid = dim3(std::max(1, std::min(samplesize / BLOCKSIZE, 4096)));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "lovasz backward logits", [&] {
compute_softmax_shallow<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
jacc.contiguous().data_ptr<scalar_t>() // store softmax
);
compute_logits_grad_shallow<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size, ignore_index,
jacc.contiguous().data_ptr<scalar_t>(),
grad_logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>()
);
});
} else {
blocky = 32;
while (blocky < dimsize) blocky <<= 1;
blocky >>= 1;
blocky = std::min(std::max(1, blocky), BLOCKSIZE);
blockx = BLOCKSIZE / blocky;
gridx = std::min(4096, std::max(1, samplesize / blockx));
block = dim3(blockx, blocky);
grid = dim3(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "lovasz backward logits", [&] {
int shm_size = sizeof(scalar_t) * BLOCKSIZE;
compute_softmax_deep<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
jacc.contiguous().data_ptr<scalar_t>() // store softmax
);
compute_logits_grad_deep<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size, ignore_index,
jacc.contiguous().data_ptr<scalar_t>(),
grad_logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>()
);
});
}
THCudaCheck(cudaGetLastError());
return grad_logits;
}
// python inferface
std::tuple<at::Tensor, at::Tensor> Lovasz_softmax_forward(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index) {
if (logits.device().type() != c10::kCUDA) {
AT_ERROR("this lovasz softmax function only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return Lovasz_softmax_forward_cuda(logits, labels, ignore_index);
}
at::Tensor Lovasz_softmax_backward(const at::Tensor &grad, const at::Tensor &logits,
const at::Tensor &labels, at::Tensor jacc,
const int64_t ignore_index) {
// TODO: try AT_ASSERTM
if (logits.device().type() != c10::kCUDA || labels.device().type() != c10::kCUDA) {
AT_ERROR("this lovasz softmax function only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return Lovasz_softmax_backward_cuda(grad, logits, labels, jacc, ignore_index);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("lovasz_softmax_forward", &Lovasz_softmax_forward, "lovasz softmax forward");
m.def("lovasz_softmax_backward", &Lovasz_softmax_backward, "lovasz softmax backward");
}
|
4dc03f9c9e75517e718b79bbf66111be5be8eeea.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "hyperbolic_tangent_layer_tester_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "../hyperbolic_tangent_layer.h"
#include "../nn_types.h"
static __forceinline__ __device__ float hyperbolic_tangent(
float x,
float hyperbolic_tangent_steepness2,
float hyperbolic_tangent_major_multiplier)
{
float y = __expf(x * hyperbolic_tangent_steepness2);
return __fdividef(y - 1.0F, y + 1.0F) * hyperbolic_tangent_major_multiplier;
}
__global__ void hyperbolic_tangent_kernel(
float4 * __restrict input,
float hyperbolic_tangent_steepness2,
float hyperbolic_tangent_major_multiplier,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
val.x = hyperbolic_tangent(val.x, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier);
val.y = hyperbolic_tangent(val.y, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier);
val.z = hyperbolic_tangent(val.z, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier);
val.w = hyperbolic_tangent(val.w, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier);
input[elem_id] = val;
}
}
namespace nnforge
{
namespace cuda
{
hyperbolic_tangent_layer_tester_cuda::hyperbolic_tangent_layer_tester_cuda()
{
}
hyperbolic_tangent_layer_tester_cuda::~hyperbolic_tangent_layer_tester_cuda()
{
}
void hyperbolic_tangent_layer_tester_cuda::enqueue_test(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hipLaunchKernelGGL(( hyperbolic_tangent_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_buffer,
hyperbolic_tangent_steepness2,
hyperbolic_tangent_major_multiplier,
elem_count);
}
void hyperbolic_tangent_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const hyperbolic_tangent_layer> layer_derived = nnforge_dynamic_pointer_cast<const hyperbolic_tangent_layer>(layer_schema);
hyperbolic_tangent_steepness2 = layer_derived->steepness * 2.0F;
hyperbolic_tangent_major_multiplier = layer_derived->scale;
}
}
}
|
4dc03f9c9e75517e718b79bbf66111be5be8eeea.cu
|
/*
* Copyright 2011-2015 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "hyperbolic_tangent_layer_tester_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "../hyperbolic_tangent_layer.h"
#include "../nn_types.h"
static __forceinline__ __device__ float hyperbolic_tangent(
float x,
float hyperbolic_tangent_steepness2,
float hyperbolic_tangent_major_multiplier)
{
float y = __expf(x * hyperbolic_tangent_steepness2);
return __fdividef(y - 1.0F, y + 1.0F) * hyperbolic_tangent_major_multiplier;
}
__global__ void hyperbolic_tangent_kernel(
float4 * __restrict input,
float hyperbolic_tangent_steepness2,
float hyperbolic_tangent_major_multiplier,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
val.x = hyperbolic_tangent(val.x, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier);
val.y = hyperbolic_tangent(val.y, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier);
val.z = hyperbolic_tangent(val.z, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier);
val.w = hyperbolic_tangent(val.w, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier);
input[elem_id] = val;
}
}
namespace nnforge
{
namespace cuda
{
hyperbolic_tangent_layer_tester_cuda::hyperbolic_tangent_layer_tester_cuda()
{
}
hyperbolic_tangent_layer_tester_cuda::~hyperbolic_tangent_layer_tester_cuda()
{
}
void hyperbolic_tangent_layer_tester_cuda::enqueue_test(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
elem_count);
hyperbolic_tangent_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_buffer,
hyperbolic_tangent_steepness2,
hyperbolic_tangent_major_multiplier,
elem_count);
}
void hyperbolic_tangent_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const hyperbolic_tangent_layer> layer_derived = nnforge_dynamic_pointer_cast<const hyperbolic_tangent_layer>(layer_schema);
hyperbolic_tangent_steepness2 = layer_derived->steepness * 2.0F;
hyperbolic_tangent_major_multiplier = layer_derived->scale;
}
}
}
|
4c65283cb86b0cb0935606f73f96f4d3ddd72198.hip
|
// !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*
* Copyright 2010-2011 Duane Merrill
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information, see our Google Code project site:
* http://code.google.com/p/back40computing/
*
******************************************************************************/
/******************************************************************************
* Tuning tool for establishing optimal copy granularity configuration types
******************************************************************************/
#include <stdio.h>
// Copy includes
#include <b40c/copy/policy.cuh>
#include <b40c/copy/enactor.cuh>
#include <b40c/util/arch_dispatch.cuh>
#include <b40c/util/cuda_properties.cuh>
#include <b40c/util/numeric_traits.cuh>
#include <b40c/util/parameter_generation.cuh>
// Test utils
#include "b40c_test_util.h"
using namespace b40c;
/******************************************************************************
* Defines, constants, globals, and utility types
******************************************************************************/
#ifndef TUNE_ARCH
#define TUNE_ARCH (200)
#endif
bool g_verbose;
int g_max_ctas = 0;
int g_iterations = 0;
bool g_verify;
/******************************************************************************
* Utility routines
******************************************************************************/
/**
* Displays the commandline usage for this tool
*/
void Usage()
{
printf("\ntune_copy [--device=<device index>] [--v] [--i=<num-iterations>] "
"[--max-ctas=<max-thread-blocks>] [--n=<num-words>]\n");
printf("\n");
printf("\t--v\tDisplays verbose configuration to the console.\n");
printf("\n");
printf("\t--verify\tChecks the result.\n");
printf("\n");
printf("\t--i\tPerforms the copy operation <num-iterations> times\n");
printf("\t\t\ton the device. Default = 1\n");
printf("\n");
printf("\t--n\tThe number of 32-bit words to comprise the sample problem\n");
printf("\n");
}
/**
* Enumerated tuning params
*/
enum TuningParam {
PARAM_BEGIN,
READ_MODIFIER,
WRITE_MODIFIER,
WORK_STEALING,
OVERSUBSCRIBED_GRID_SIZE,
LOG_THREADS,
LOG_LOAD_VEC_SIZE,
LOG_LOADS_PER_TILE,
PARAM_END,
// Parameters below here are currently not part of the tuning sweep
MAX_CTA_OCCUPANCY,
// Derive these from the others above
LOG_SCHEDULE_GRANULARITY,
};
/**
* Encapsulation structure for
* - Wrapping problem type and storage
* - Providing call-back for parameter-list generation
*/
template <typename T, typename SizeT>
class TuneEnactor : public copy::Enactor
{
public:
T *d_dest;
T *d_src;
T *h_data;
T *h_reference;
SizeT num_elements;
/**
* Ranges for the tuning params
*/
template <typename ParamList, int PARAM> struct Ranges;
// READ_MODIFIER
template <typename ParamList>
struct Ranges<ParamList, READ_MODIFIER> {
enum {
MIN = ((TUNE_ARCH < 200) || (util::NumericTraits<T>::REPRESENTATION == util::NOT_A_NUMBER)) ? util::io::ld::NONE : util::io::ld::NONE + 1,
MAX = ((TUNE_ARCH < 200) || (util::NumericTraits<T>::REPRESENTATION == util::NOT_A_NUMBER)) ? util::io::ld::NONE : util::io::ld::LIMIT - 1 // No type modifiers for pre-Fermi or non-builtin types
};
};
// WRITE_MODIFIER
template <typename ParamList>
struct Ranges<ParamList, WRITE_MODIFIER> {
enum {
MIN = ((TUNE_ARCH < 200) || (util::NumericTraits<T>::REPRESENTATION == util::NOT_A_NUMBER)) ? util::io::st::NONE : util::io::st::NONE + 1,
MAX = ((TUNE_ARCH < 200) || (util::NumericTraits<T>::REPRESENTATION == util::NOT_A_NUMBER)) ? util::io::st::NONE : util::io::st::LIMIT - 1 // No type modifiers for pre-Fermi or non-builtin types
};
};
// WORK_STEALING
template <typename ParamList>
struct Ranges<ParamList, WORK_STEALING> {
enum {
MIN = 0,
MAX = (TUNE_ARCH < 200) ? 0 : 1 // Only bother tuning atomic worstealing on Fermi+
};
};
// OVERSUBSCRIBED_GRID_SIZE
template <typename ParamList>
struct Ranges<ParamList, OVERSUBSCRIBED_GRID_SIZE> {
enum {
MIN = 0,
MAX = !util::Access<ParamList, WORK_STEALING>::VALUE // Don't oversubscribe if we're workstealing
};
};
// LOG_THREADS
template <typename ParamList>
struct Ranges<ParamList, LOG_THREADS> {
enum {
MIN = B40C_LOG_WARP_THREADS(TUNE_ARCH),
MAX = B40C_LOG_CTA_THREADS(TUNE_ARCH)
};
};
// LOG_LOAD_VEC_SIZE
template <typename ParamList>
struct Ranges<ParamList, LOG_LOAD_VEC_SIZE> {
enum {
MIN = 0,
MAX = 2
};
};
// LOG_LOADS_PER_TILE
template <typename ParamList>
struct Ranges<ParamList, LOG_LOADS_PER_TILE> {
enum {
MIN = 0,
MAX = 2
};
};
/**
* Constructor
*/
TuneEnactor(SizeT num_elements) :
copy::Enactor(),
d_dest(NULL),
d_src(NULL),
h_data(NULL),
h_reference(NULL),
num_elements(num_elements) {}
/**
* Timed scan for applying a specific granularity configuration type
*/
template <typename Policy, int EST_REGS_OCCUPANCY>
struct TimedCopy
{
template <typename Enactor>
static void Invoke(Enactor *enactor)
{
Policy::Print();
fflush(stdout);
// Perform a single iteration to allocate any memory if needed, prime code caches, etc.
enactor->ENACTOR_DEBUG = g_verbose;
if (enactor->template Copy<Policy>(
enactor->d_dest,
enactor->d_src,
enactor->num_elements,
g_max_ctas))
{
exit(1);
}
enactor->ENACTOR_DEBUG = false;
// Perform the timed number of iterations
GpuTimer timer;
for (int i = 0; i < g_iterations; i++) {
// Start cuda timing record
timer.Start();
// Call the scan API routine
if (enactor->template Copy<Policy>(
enactor->d_dest,
enactor->d_src,
enactor->num_elements,
g_max_ctas))
{
exit(1);
}
// End cuda timing record
timer.Stop();
// Flushes any stdio from the GPU
if (util::B40CPerror(hipDeviceSynchronize(), "TimedCopy hipDeviceSynchronize failed: ", __FILE__, __LINE__)) {
exit(1);
}
}
// Display timing information
double avg_runtime = double(timer.ElapsedMillis()) / g_iterations;
double throughput = 0.0;
if (avg_runtime > 0.0) throughput = ((double) enactor->num_elements) / avg_runtime / 1000.0 / 1000.0;
printf(", %f, %f, %f, ",
avg_runtime, throughput, throughput * sizeof(T) * 2);
fflush(stdout);
if (g_verify) {
// Copy out data
if (util::B40CPerror(hipMemcpy(
enactor->h_data,
enactor->d_dest, sizeof(T) *
enactor->num_elements, hipMemcpyDeviceToHost),
"TimedCopy hipMemcpy d_dest failed: ", __FILE__, __LINE__)) exit(1);
// Verify solution
CompareResults<T>(
enactor->h_data,
enactor->h_reference,
enactor->num_elements, true);
}
printf("\n");
fflush(stdout);
}
};
template <typename Policy>
struct TimedCopy<Policy, 0>
{
template <typename Enactor>
static void Invoke(Enactor *enactor) {}
};
/**
* Callback invoked by parameter-list generation
*/
template <typename ParamList>
void Invoke()
{
const int C_READ_MODIFIER =
util::Access<ParamList, READ_MODIFIER>::VALUE;
const int C_WRITE_MODIFIER =
util::Access<ParamList, WRITE_MODIFIER>::VALUE;
const int C_WORK_STEALING =
util::Access<ParamList, WORK_STEALING>::VALUE;
const int C_OVERSUBSCRIBED_GRID_SIZE =
util::Access<ParamList, OVERSUBSCRIBED_GRID_SIZE>::VALUE;
const int C_LOG_THREADS =
util::Access<ParamList, LOG_THREADS>::VALUE;
const int C_LOG_LOAD_VEC_SIZE =
util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE;
const int C_LOG_LOADS_PER_TILE =
util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE;
const int C_MIN_CTA_OCCUPANCY = 1;
const int C_LOG_SCHEDULE_GRANULARITY =
C_LOG_LOADS_PER_TILE +
C_LOG_LOAD_VEC_SIZE +
C_LOG_THREADS;
// Establish the granularity configuration type
typedef copy::Policy <
T,
SizeT,
TUNE_ARCH,
C_LOG_SCHEDULE_GRANULARITY,
C_MIN_CTA_OCCUPANCY,
C_LOG_THREADS,
C_LOG_LOAD_VEC_SIZE,
C_LOG_LOADS_PER_TILE,
(util::io::ld::CacheModifier) C_READ_MODIFIER,
(util::io::st::CacheModifier) C_WRITE_MODIFIER,
C_WORK_STEALING,
C_OVERSUBSCRIBED_GRID_SIZE> Policy;
const int REG_MULTIPLIER = (sizeof(T) + 3) / 4;
const int TILE_ELEMENTS_PER_THREAD = 1 << (C_LOG_THREADS + C_LOG_LOAD_VEC_SIZE + C_LOG_LOADS_PER_TILE);
const int REGS_ESTIMATE = (REG_MULTIPLIER * TILE_ELEMENTS_PER_THREAD) + 2;
const int EST_REGS_OCCUPANCY = B40C_SM_REGISTERS(TUNE_ARCH) / REGS_ESTIMATE;
// Invoke this config
TimedCopy<Policy, EST_REGS_OCCUPANCY>::Invoke(this);
}
};
/**
* Creates an example scan problem and then dispatches the problem
* to the GPU for the given number of iterations, displaying runtime information.
*/
template<typename T, typename SizeT>
void TestCopy(SizeT num_elements)
{
// Allocate storage and enactor
typedef TuneEnactor<T, SizeT> Detail;
Detail detail(num_elements);
if (util::B40CPerror(hipMalloc((void**) &detail.d_src, sizeof(T) * num_elements),
"TimedCopy hipMalloc d_src failed: ", __FILE__, __LINE__)) exit(1);
if (util::B40CPerror(hipMalloc((void**) &detail.d_dest, sizeof(T) * num_elements),
"TimedCopy hipMalloc d_dest failed: ", __FILE__, __LINE__)) exit(1);
if ((detail.h_data = (T*) malloc(sizeof(T) * num_elements)) == NULL) {
fprintf(stderr, "Host malloc of problem data failed\n");
exit(1);
}
if ((detail.h_reference = (T*) malloc(sizeof(T) * num_elements)) == NULL) {
fprintf(stderr, "Host malloc of problem data failed\n");
exit(1);
}
for (SizeT i = 0; i < num_elements; ++i) {
// util::RandomBits<T>(detail.h_data[i], 0);
detail.h_data[i] = i;
detail.h_reference[i] = detail.h_data[i];
}
// Move a fresh copy of the problem into device storage
if (util::B40CPerror(hipMemcpy(detail.d_src, detail.h_data, sizeof(T) * num_elements, hipMemcpyHostToDevice),
"TimedCopy hipMemcpy d_src failed: ", __FILE__, __LINE__)) exit(1);
// Run the timing tests
util::ParamListSweep<
Detail,
PARAM_BEGIN + 1,
PARAM_END,
Detail::template Ranges>::template Invoke<util::EmptyTuple>(detail);
// Free allocated memory
if (detail.d_src) hipFree(detail.d_src);
if (detail.d_dest) hipFree(detail.d_dest);
// Free our allocated host memory
if (detail.h_data) free(detail.h_data);
if (detail.h_reference) free(detail.h_reference);
}
/******************************************************************************
* Main
******************************************************************************/
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
DeviceInit(args);
//srand(time(NULL));
srand(0); // presently deterministic
int num_elements = 1024;
// Check command line arguments
if (args.CheckCmdLineFlag("help")) {
Usage();
return 0;
}
args.GetCmdLineArgument("i", g_iterations);
args.GetCmdLineArgument("n", num_elements);
args.GetCmdLineArgument("max-ctas", g_max_ctas);
g_verify = args.CheckCmdLineFlag("verify");
g_verbose = args.CheckCmdLineFlag("v");
util::CudaProperties cuda_props;
printf("Test Copy: %d iterations, %lu 32-bit words (%lu bytes)", g_iterations, (unsigned long) num_elements, (unsigned long) num_elements * 4);
printf("\nCodeGen: \t[device_sm_version: %d, kernel_ptx_version: %d]\n\n",
cuda_props.device_sm_version, cuda_props.kernel_ptx_version);
printf("sizeof(T), sizeof(SizeT), CUDA_ARCH, "
"LOG_SCHEDULE_GRANULARITY, MIN_CTA_OCCUPANCY, LOG_THREADS, LOG_LOAD_VEC_SIZE, LOG_LOADS_PER_TILE, "
"READ_MODIFIER, WRITE_MODIFIER, WORK_STEALING, OVERSUBSCRIBED_GRID_SIZE, "
"elapsed time (ms), throughput (10^9 items/s), bandwidth (10^9 B/s), Correctness\n");
// Execute test(s)
#if (TUNE_SIZE == 0) || (TUNE_SIZE == 1)
{
typedef unsigned char T;
TestCopy<T>(num_elements * 4);
}
#endif
#if (TUNE_SIZE == 0) || (TUNE_SIZE == 2)
{
typedef unsigned short T;
TestCopy<T>(num_elements * 2);
}
#endif
#if (TUNE_SIZE == 0) || (TUNE_SIZE == 4)
{
typedef unsigned int T;
TestCopy<T>(num_elements);
}
#endif
#if (TUNE_SIZE == 0) || (TUNE_SIZE == 8)
{
typedef unsigned long long T;
TestCopy<T>(num_elements / 2);
}
#endif
return 0;
}
|
4c65283cb86b0cb0935606f73f96f4d3ddd72198.cu
|
/******************************************************************************
*
* Copyright 2010-2011 Duane Merrill
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information, see our Google Code project site:
* http://code.google.com/p/back40computing/
*
******************************************************************************/
/******************************************************************************
* Tuning tool for establishing optimal copy granularity configuration types
******************************************************************************/
#include <stdio.h>
// Copy includes
#include <b40c/copy/policy.cuh>
#include <b40c/copy/enactor.cuh>
#include <b40c/util/arch_dispatch.cuh>
#include <b40c/util/cuda_properties.cuh>
#include <b40c/util/numeric_traits.cuh>
#include <b40c/util/parameter_generation.cuh>
// Test utils
#include "b40c_test_util.h"
using namespace b40c;
/******************************************************************************
* Defines, constants, globals, and utility types
******************************************************************************/
#ifndef TUNE_ARCH
#define TUNE_ARCH (200)
#endif
bool g_verbose;
int g_max_ctas = 0;
int g_iterations = 0;
bool g_verify;
/******************************************************************************
* Utility routines
******************************************************************************/
/**
* Displays the commandline usage for this tool
*/
void Usage()
{
printf("\ntune_copy [--device=<device index>] [--v] [--i=<num-iterations>] "
"[--max-ctas=<max-thread-blocks>] [--n=<num-words>]\n");
printf("\n");
printf("\t--v\tDisplays verbose configuration to the console.\n");
printf("\n");
printf("\t--verify\tChecks the result.\n");
printf("\n");
printf("\t--i\tPerforms the copy operation <num-iterations> times\n");
printf("\t\t\ton the device. Default = 1\n");
printf("\n");
printf("\t--n\tThe number of 32-bit words to comprise the sample problem\n");
printf("\n");
}
/**
* Enumerated tuning params
*/
enum TuningParam {
PARAM_BEGIN,
READ_MODIFIER,
WRITE_MODIFIER,
WORK_STEALING,
OVERSUBSCRIBED_GRID_SIZE,
LOG_THREADS,
LOG_LOAD_VEC_SIZE,
LOG_LOADS_PER_TILE,
PARAM_END,
// Parameters below here are currently not part of the tuning sweep
MAX_CTA_OCCUPANCY,
// Derive these from the others above
LOG_SCHEDULE_GRANULARITY,
};
/**
* Encapsulation structure for
* - Wrapping problem type and storage
* - Providing call-back for parameter-list generation
*/
template <typename T, typename SizeT>
class TuneEnactor : public copy::Enactor
{
public:
T *d_dest;
T *d_src;
T *h_data;
T *h_reference;
SizeT num_elements;
/**
* Ranges for the tuning params
*/
template <typename ParamList, int PARAM> struct Ranges;
// READ_MODIFIER
template <typename ParamList>
struct Ranges<ParamList, READ_MODIFIER> {
enum {
MIN = ((TUNE_ARCH < 200) || (util::NumericTraits<T>::REPRESENTATION == util::NOT_A_NUMBER)) ? util::io::ld::NONE : util::io::ld::NONE + 1,
MAX = ((TUNE_ARCH < 200) || (util::NumericTraits<T>::REPRESENTATION == util::NOT_A_NUMBER)) ? util::io::ld::NONE : util::io::ld::LIMIT - 1 // No type modifiers for pre-Fermi or non-builtin types
};
};
// WRITE_MODIFIER
template <typename ParamList>
struct Ranges<ParamList, WRITE_MODIFIER> {
enum {
MIN = ((TUNE_ARCH < 200) || (util::NumericTraits<T>::REPRESENTATION == util::NOT_A_NUMBER)) ? util::io::st::NONE : util::io::st::NONE + 1,
MAX = ((TUNE_ARCH < 200) || (util::NumericTraits<T>::REPRESENTATION == util::NOT_A_NUMBER)) ? util::io::st::NONE : util::io::st::LIMIT - 1 // No type modifiers for pre-Fermi or non-builtin types
};
};
// WORK_STEALING
template <typename ParamList>
struct Ranges<ParamList, WORK_STEALING> {
enum {
MIN = 0,
MAX = (TUNE_ARCH < 200) ? 0 : 1 // Only bother tuning atomic worstealing on Fermi+
};
};
// OVERSUBSCRIBED_GRID_SIZE
template <typename ParamList>
struct Ranges<ParamList, OVERSUBSCRIBED_GRID_SIZE> {
enum {
MIN = 0,
MAX = !util::Access<ParamList, WORK_STEALING>::VALUE // Don't oversubscribe if we're workstealing
};
};
// LOG_THREADS
template <typename ParamList>
struct Ranges<ParamList, LOG_THREADS> {
enum {
MIN = B40C_LOG_WARP_THREADS(TUNE_ARCH),
MAX = B40C_LOG_CTA_THREADS(TUNE_ARCH)
};
};
// LOG_LOAD_VEC_SIZE
template <typename ParamList>
struct Ranges<ParamList, LOG_LOAD_VEC_SIZE> {
enum {
MIN = 0,
MAX = 2
};
};
// LOG_LOADS_PER_TILE
template <typename ParamList>
struct Ranges<ParamList, LOG_LOADS_PER_TILE> {
enum {
MIN = 0,
MAX = 2
};
};
/**
* Constructor
*/
TuneEnactor(SizeT num_elements) :
copy::Enactor(),
d_dest(NULL),
d_src(NULL),
h_data(NULL),
h_reference(NULL),
num_elements(num_elements) {}
/**
* Timed scan for applying a specific granularity configuration type
*/
template <typename Policy, int EST_REGS_OCCUPANCY>
struct TimedCopy
{
template <typename Enactor>
static void Invoke(Enactor *enactor)
{
Policy::Print();
fflush(stdout);
// Perform a single iteration to allocate any memory if needed, prime code caches, etc.
enactor->ENACTOR_DEBUG = g_verbose;
if (enactor->template Copy<Policy>(
enactor->d_dest,
enactor->d_src,
enactor->num_elements,
g_max_ctas))
{
exit(1);
}
enactor->ENACTOR_DEBUG = false;
// Perform the timed number of iterations
GpuTimer timer;
for (int i = 0; i < g_iterations; i++) {
// Start cuda timing record
timer.Start();
// Call the scan API routine
if (enactor->template Copy<Policy>(
enactor->d_dest,
enactor->d_src,
enactor->num_elements,
g_max_ctas))
{
exit(1);
}
// End cuda timing record
timer.Stop();
// Flushes any stdio from the GPU
if (util::B40CPerror(cudaThreadSynchronize(), "TimedCopy cudaThreadSynchronize failed: ", __FILE__, __LINE__)) {
exit(1);
}
}
// Display timing information
double avg_runtime = double(timer.ElapsedMillis()) / g_iterations;
double throughput = 0.0;
if (avg_runtime > 0.0) throughput = ((double) enactor->num_elements) / avg_runtime / 1000.0 / 1000.0;
printf(", %f, %f, %f, ",
avg_runtime, throughput, throughput * sizeof(T) * 2);
fflush(stdout);
if (g_verify) {
// Copy out data
if (util::B40CPerror(cudaMemcpy(
enactor->h_data,
enactor->d_dest, sizeof(T) *
enactor->num_elements, cudaMemcpyDeviceToHost),
"TimedCopy cudaMemcpy d_dest failed: ", __FILE__, __LINE__)) exit(1);
// Verify solution
CompareResults<T>(
enactor->h_data,
enactor->h_reference,
enactor->num_elements, true);
}
printf("\n");
fflush(stdout);
}
};
template <typename Policy>
struct TimedCopy<Policy, 0>
{
template <typename Enactor>
static void Invoke(Enactor *enactor) {}
};
/**
* Callback invoked by parameter-list generation
*/
template <typename ParamList>
void Invoke()
{
const int C_READ_MODIFIER =
util::Access<ParamList, READ_MODIFIER>::VALUE;
const int C_WRITE_MODIFIER =
util::Access<ParamList, WRITE_MODIFIER>::VALUE;
const int C_WORK_STEALING =
util::Access<ParamList, WORK_STEALING>::VALUE;
const int C_OVERSUBSCRIBED_GRID_SIZE =
util::Access<ParamList, OVERSUBSCRIBED_GRID_SIZE>::VALUE;
const int C_LOG_THREADS =
util::Access<ParamList, LOG_THREADS>::VALUE;
const int C_LOG_LOAD_VEC_SIZE =
util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE;
const int C_LOG_LOADS_PER_TILE =
util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE;
const int C_MIN_CTA_OCCUPANCY = 1;
const int C_LOG_SCHEDULE_GRANULARITY =
C_LOG_LOADS_PER_TILE +
C_LOG_LOAD_VEC_SIZE +
C_LOG_THREADS;
// Establish the granularity configuration type
typedef copy::Policy <
T,
SizeT,
TUNE_ARCH,
C_LOG_SCHEDULE_GRANULARITY,
C_MIN_CTA_OCCUPANCY,
C_LOG_THREADS,
C_LOG_LOAD_VEC_SIZE,
C_LOG_LOADS_PER_TILE,
(util::io::ld::CacheModifier) C_READ_MODIFIER,
(util::io::st::CacheModifier) C_WRITE_MODIFIER,
C_WORK_STEALING,
C_OVERSUBSCRIBED_GRID_SIZE> Policy;
const int REG_MULTIPLIER = (sizeof(T) + 3) / 4;
const int TILE_ELEMENTS_PER_THREAD = 1 << (C_LOG_THREADS + C_LOG_LOAD_VEC_SIZE + C_LOG_LOADS_PER_TILE);
const int REGS_ESTIMATE = (REG_MULTIPLIER * TILE_ELEMENTS_PER_THREAD) + 2;
const int EST_REGS_OCCUPANCY = B40C_SM_REGISTERS(TUNE_ARCH) / REGS_ESTIMATE;
// Invoke this config
TimedCopy<Policy, EST_REGS_OCCUPANCY>::Invoke(this);
}
};
/**
* Creates an example scan problem and then dispatches the problem
* to the GPU for the given number of iterations, displaying runtime information.
*/
template<typename T, typename SizeT>
void TestCopy(SizeT num_elements)
{
// Allocate storage and enactor
typedef TuneEnactor<T, SizeT> Detail;
Detail detail(num_elements);
if (util::B40CPerror(cudaMalloc((void**) &detail.d_src, sizeof(T) * num_elements),
"TimedCopy cudaMalloc d_src failed: ", __FILE__, __LINE__)) exit(1);
if (util::B40CPerror(cudaMalloc((void**) &detail.d_dest, sizeof(T) * num_elements),
"TimedCopy cudaMalloc d_dest failed: ", __FILE__, __LINE__)) exit(1);
if ((detail.h_data = (T*) malloc(sizeof(T) * num_elements)) == NULL) {
fprintf(stderr, "Host malloc of problem data failed\n");
exit(1);
}
if ((detail.h_reference = (T*) malloc(sizeof(T) * num_elements)) == NULL) {
fprintf(stderr, "Host malloc of problem data failed\n");
exit(1);
}
for (SizeT i = 0; i < num_elements; ++i) {
// util::RandomBits<T>(detail.h_data[i], 0);
detail.h_data[i] = i;
detail.h_reference[i] = detail.h_data[i];
}
// Move a fresh copy of the problem into device storage
if (util::B40CPerror(cudaMemcpy(detail.d_src, detail.h_data, sizeof(T) * num_elements, cudaMemcpyHostToDevice),
"TimedCopy cudaMemcpy d_src failed: ", __FILE__, __LINE__)) exit(1);
// Run the timing tests
util::ParamListSweep<
Detail,
PARAM_BEGIN + 1,
PARAM_END,
Detail::template Ranges>::template Invoke<util::EmptyTuple>(detail);
// Free allocated memory
if (detail.d_src) cudaFree(detail.d_src);
if (detail.d_dest) cudaFree(detail.d_dest);
// Free our allocated host memory
if (detail.h_data) free(detail.h_data);
if (detail.h_reference) free(detail.h_reference);
}
/******************************************************************************
* Main
******************************************************************************/
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
DeviceInit(args);
//srand(time(NULL));
srand(0); // presently deterministic
int num_elements = 1024;
// Check command line arguments
if (args.CheckCmdLineFlag("help")) {
Usage();
return 0;
}
args.GetCmdLineArgument("i", g_iterations);
args.GetCmdLineArgument("n", num_elements);
args.GetCmdLineArgument("max-ctas", g_max_ctas);
g_verify = args.CheckCmdLineFlag("verify");
g_verbose = args.CheckCmdLineFlag("v");
util::CudaProperties cuda_props;
printf("Test Copy: %d iterations, %lu 32-bit words (%lu bytes)", g_iterations, (unsigned long) num_elements, (unsigned long) num_elements * 4);
printf("\nCodeGen: \t[device_sm_version: %d, kernel_ptx_version: %d]\n\n",
cuda_props.device_sm_version, cuda_props.kernel_ptx_version);
printf("sizeof(T), sizeof(SizeT), CUDA_ARCH, "
"LOG_SCHEDULE_GRANULARITY, MIN_CTA_OCCUPANCY, LOG_THREADS, LOG_LOAD_VEC_SIZE, LOG_LOADS_PER_TILE, "
"READ_MODIFIER, WRITE_MODIFIER, WORK_STEALING, OVERSUBSCRIBED_GRID_SIZE, "
"elapsed time (ms), throughput (10^9 items/s), bandwidth (10^9 B/s), Correctness\n");
// Execute test(s)
#if (TUNE_SIZE == 0) || (TUNE_SIZE == 1)
{
typedef unsigned char T;
TestCopy<T>(num_elements * 4);
}
#endif
#if (TUNE_SIZE == 0) || (TUNE_SIZE == 2)
{
typedef unsigned short T;
TestCopy<T>(num_elements * 2);
}
#endif
#if (TUNE_SIZE == 0) || (TUNE_SIZE == 4)
{
typedef unsigned int T;
TestCopy<T>(num_elements);
}
#endif
#if (TUNE_SIZE == 0) || (TUNE_SIZE == 8)
{
typedef unsigned long long T;
TestCopy<T>(num_elements / 2);
}
#endif
return 0;
}
|
6356fa75fd5463517c9b8ea252f711610d46043e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "defs.h"
#include "common.h"
#define MATRIX_SIZE 500
void serialMM(int sz, double **A, double **B, double **C)
{
int i = 0, j = 0, k = 0;
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz ; k++) {
C[i][j] = C[i][j] + A[i][k] * B[k][j];
}
}
}
return;
}
__global__ void cudakernel(double *buf)
{
// fill in your CUDA kernel code here
}
void cudaMM(int sz, double **A, double **B, double **E)
{
// fill in your CUDA MM code here
// you should store B as efficiently as possible
}
int main(int argc, char *argv[])
{
double **A = Make2DDoubleArray(MATRIX_SIZE, MATRIX_SIZE);
double **B = Make2DDoubleArray(MATRIX_SIZE, MATRIX_SIZE);
double **C = Make2DDoubleArray(MATRIX_SIZE, MATRIX_SIZE);
double **E = Make2DDoubleArray(MATRIX_SIZE, MATRIX_SIZE);
// fill Symmetric matrix A
for (int i = 0; i < MATRIX_SIZE; i++) {
for (int j = 0; j <= i; j ++) {
A[i][j] = (double)min(i+1,j+1);
}
}
// fill Tridiagonal matrix B
for (int i = 0; i < MATRIX_SIZE; i++)
B[i][i] = 1;
for (int i = 0; i < MATRIX_SIZE - 1; i++)
B[i][i+1] = 2;
for (int i = 1; i < MATRIX_SIZE; i++)
B[i][i-1] = 2;
double elapse = 0;
for (int i = 0; i < NUM_RUNS; i++) {
double seconds = read_timer();
serialMM(MATRIX_SIZE, A, B, C);
elapse += (read_timer() - seconds);
}
elapse /= NUM_RUNS;
printf("serial matrix multiplication: %lf\n", elapse);
elapse = 0;
for (int i = 0; i < NUM_RUNS; i++) {
double seconds = read_timer();
cudaMM(MATRIX_SIZE, A, B, E);
elapse += (read_timer() - seconds);
}
elapse /= NUM_RUNS;
printf("GPU matrix multiplication: %lf\n", elapse);
if (matrix_compare(N, C, E) != 0) {
printf("*** error ***\n");
exit(-1);
}
return 0;
}
|
6356fa75fd5463517c9b8ea252f711610d46043e.cu
|
#include <stdio.h>
#include "defs.h"
#include "common.h"
#define MATRIX_SIZE 500
void serialMM(int sz, double **A, double **B, double **C)
{
int i = 0, j = 0, k = 0;
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz ; k++) {
C[i][j] = C[i][j] + A[i][k] * B[k][j];
}
}
}
return;
}
__global__ void cudakernel(double *buf)
{
// fill in your CUDA kernel code here
}
void cudaMM(int sz, double **A, double **B, double **E)
{
// fill in your CUDA MM code here
// you should store B as efficiently as possible
}
int main(int argc, char *argv[])
{
double **A = Make2DDoubleArray(MATRIX_SIZE, MATRIX_SIZE);
double **B = Make2DDoubleArray(MATRIX_SIZE, MATRIX_SIZE);
double **C = Make2DDoubleArray(MATRIX_SIZE, MATRIX_SIZE);
double **E = Make2DDoubleArray(MATRIX_SIZE, MATRIX_SIZE);
// fill Symmetric matrix A
for (int i = 0; i < MATRIX_SIZE; i++) {
for (int j = 0; j <= i; j ++) {
A[i][j] = (double)min(i+1,j+1);
}
}
// fill Tridiagonal matrix B
for (int i = 0; i < MATRIX_SIZE; i++)
B[i][i] = 1;
for (int i = 0; i < MATRIX_SIZE - 1; i++)
B[i][i+1] = 2;
for (int i = 1; i < MATRIX_SIZE; i++)
B[i][i-1] = 2;
double elapse = 0;
for (int i = 0; i < NUM_RUNS; i++) {
double seconds = read_timer();
serialMM(MATRIX_SIZE, A, B, C);
elapse += (read_timer() - seconds);
}
elapse /= NUM_RUNS;
printf("serial matrix multiplication: %lf\n", elapse);
elapse = 0;
for (int i = 0; i < NUM_RUNS; i++) {
double seconds = read_timer();
cudaMM(MATRIX_SIZE, A, B, E);
elapse += (read_timer() - seconds);
}
elapse /= NUM_RUNS;
printf("GPU matrix multiplication: %lf\n", elapse);
if (matrix_compare(N, C, E) != 0) {
printf("*** error ***\n");
exit(-1);
}
return 0;
}
|
3b3ba0b684817f1e8c6dec4e628730fd126e6039.hip
|
// !!! This is a file automatically generated by hipify!!!
//scp -r /home/awd/work/coursework/DS295/project/pp_project/parallel/* [email protected]:/home/dtanwar/Project/Parallel_Programming_Project/parallel
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <sys/time.h>
#include <functional>
#include <iostream>
#include <fstream>
#include <climits>
#include<cuda.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <math.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define GS 5
#define BS 5
using namespace std;
typedef int uui;
typedef int var;
typedef struct{
var V; //no of vertices
var E; //no of edges
var n; //no of non empty rows
//var E;
uui *colind; //nonzeroes in each row (colind)
uui *roff; //startig offset of each row (rowoff)
uui *rlen; //length of each row
uui *rows; //indices of the non empty rows
} G;
__device__ int L;
__global__ void reset_bitmap(var *bitmap , var blockid, var V){
int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index >= V*blockid && index < V*(blockid+1)){
atomicAnd(bitmap + index , 0);
}
}
__global__ void find(var *data, var value, /*int min_idx,*/ var io_s, var rlen_i){
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if(idx >= io_s && idx<= rlen_i){
if(data[idx] == value)
atomicMin(&L, idx);
}
}
//cudamalloc colind , roff , rows , rlen , bitmap , E , V ,n , supp,k ;
__global__ void getmajorsupport(uui* d_colind, uui* d_roff, uui* d_rows, uui* d_rlen, var* bitmap, var E, var V, var n, uui* d_supp, var K){
__shared__ int broadcast[BS]; //TODO: 2d array! why?
/* if(threadIdx.x==0 && blockIdx.x==1)
// {
// printf("\nkernel threadId.x=%d blockid.x=%d E=%d V=%d n=%d K=%d\n",threadIdx.x,blockIdx.x,E,V,n,K );
// // for (var i=0;i<(n) ;i++)
// // printf("%d ",d_rows[i]);
// // printf("\n");
// // printf("rows\n");
// __syncthreads();
// printf("colind\n");
// for(var i=0;i<E;i++)
// printf("%d ",d_colind[i] );
// printf("\n");
// __syncthreads();
// // printf("roff\n" );
// // for(var i=0;i<V+1;i++)
// // printf("%d ",d_roff[i]);
// // printf("\n");
// // printf("rlen\n");
// // for(var i=0;i<V;i++)
// // printf("%d ",d_rlen[i]);
// // printf("\n");
//
//
//
// } */
var i,io_s,io_e,j,jo_s,jo_e,jo,io,c,count,k;
for (var s = 0 ; s<n ; s+=gridDim.x){
printf("Inside kernel\n");
i = d_rows[s];
io_s = d_roff[i];
io_e = io_s + d_rlen[i];
printf("Inside 4\n");
for (io=io_s ; io < io_e ; io += blockDim.x){
printf("Inside 5, io=%d", io);
c = (io + threadIdx.x < io_e) ? d_colind[io + threadIdx.x] : -1;
printf("Inside 6, c=%d ", c);
if (c > -1){
atomicOr ((bitmap + (V * blockIdx.x) +c) , 1);
broadcast[threadIdx.x] = c;
printf("Inside 1\n");
}
__syncthreads();
for (var t=0 ; t < blockDim.x ; t++){
j = broadcast[t];
printf("Inside 2\n");
if (j == -1) break;
count = 0;
jo_s = d_roff[j];
jo_e = jo_s + d_rlen[j];
for(jo = jo_s + threadIdx.x ; jo < jo_e ; jo += blockDim.x){
k = d_colind[jo];
if(bitmap[V * blockIdx.x + k] == 1){
count ++;
atomicAdd(d_supp + jo , 1);
// find<<< E/1024 +1, 1024 >>>(d_colind, k , /*&L,*/ io_s, d_rlen[i]);
for(L=0; L <= d_rlen[i] ; L++)
if (d_colind[io_s + L] == k)
break;
printf("Before: i=%d, j=%d, k=%d, l=%d\n",i,j,k,L);
atomicAdd(d_supp + io_s + L , 1);
printf("After: i=%d, j=%d, k=%d, l=%d\n",i,j,k,L);
}
}
atomicAdd(d_supp + io + t , count);
}
}
// for(var x = V*blockIdx.x, i=0; i<V/*x< V*(blockIdx.x + 1)*/ ; i++,x++){
// atomicAnd(bitmap + x , 0);
// }
atomicAnd(bitmap + (V * blockIdx.x) + c , 0);
//reset_bitmap<<< GS,BS >>> (bitmap, blockIdx.x,V);
}
__syncthreads();
}
// #include "read_graph.hpp"
ifstream fin;
ofstream fout;
string infile, outfile;
void readGraph(string filename, G *g){
// cout<<"inside readGraph"<<endl;
// infile ="../../../input/" + name + ".mmio" ; // ../../../input/amazon0302_adj.mmio
// outfile="../../output/serial/" + name + ".txt" ; // dataset+"-out.txt";
infile =filename;
cout<<infile<<endl;
fin.open(infile.c_str()); // opening the input file
fout.open(outfile.c_str()); // opening the output file
string temp;
getline(fin,temp); // readint the description line 1
getline(fin,temp); // reading the description line 2
var temp_edge; // temperory edge because edge weight is useless
var u,v; // the v1,v2 of edges
fin >> g->V>>g->V >> g->E ; // reading the MxN graph and edges
cout<< g->V<<" "<< g->E<<endl; // just checking if it worked
/**************************allocating & initializing all flag[V] to false**********************************/
// bool flag[g->V]; // tells whether particular row is empty or not
// for (var i=0 ; i < g->V ; i++) {
// flag[i] = false; // false means empty
// }
thrust::device_vector<bool> flag(g->V);
thrust::fill(flag.begin(), flag.end(),0);
/**************************allocating & initializing all roff[V+1] to zero**********************************/
g->roff = (uui *) malloc((g->V + 1) * sizeof(uui));
assert(g->roff != NULL);
for (var i=0 ; i < g->V+1 ; i++) {
g->roff[i] = 0;
//cout<<g->roff[i]<<" ";
};
//cout<<endl;
/**************************increase row offset and set flag for non empty row********************************/
for (var i=0; i<g->E; ++i) { //thrust
fin >> u >> v>>temp_int;
//cout<< u <<" "<<v <<endl;
if(u > v)
g->roff[u+1]++ , flag[u] = true;
else if(u < v)
g->roff[v+1]++ , flag[v] = true;
}
/**********************populates indexs of nonzero rows rows[n] and initilizes n (no of non empty rows)******/
g->rows = (uui *) malloc((g->V) * sizeof(uui));
g->n = 0;
var k =0;
for (var i = 0; i<g->V; i++){
if (flag[i] == true){
g->n++; //thrust
g->rows[k++] = i; //thrust
}
}
/**********************************************************************************************************/
//converting the roff from degree holder to actual usage.
uui *temp_num_edges = (uui *) malloc((g->V + 1) * sizeof(uui));
assert(temp_num_edges != NULL);
temp_num_edges[0] = 0;
//g->E= 0;
k=0;
for(var i = 0; i < g->V; i++) {
// g->E += g->roff[i];
k += g->roff[i+1];
temp_num_edges[i+1] =k;
}
for(var i= 0; i < g->V+1; i++)
g->roff[i] = temp_num_edges[i];
/**********************************************************************************************************/
g->rlen = (uui *) malloc((g->V) * sizeof(uui));
k =0;
for (var i = 0; i<g->V; i++){
if (flag[i] == true)
g->rlen[k] = g->roff[i+1] - g->roff[i];
else
g->rlen[k] = 0;
k++;
}
/**********************************************************************************************************/
//Allocate space for colind
g->colind = (uui *) malloc(g->E * sizeof(uui));
assert(g->colind != NULL);
fin.close();
fin.open(infile.c_str());
getline(fin,temp); // readint the description line 1
getline(fin,temp); // reading the description line 2
//Read V and E
//fscanf(infp, "%ld %ld\n", &(g->n), &g->E);
fin>>g->V>>g->V>>g->E;
for(var i = 0; i < g->E; i++)
g->colind[i] = 0;
//Read the edges
// while( fscanf(infp, "%u %u\n", &u, &v) != EOF ) {
for(var i=0 ; i<g->E ; i++){
fin>>u>>v>>temp_int;
if(u>v){
g->colind[ temp_num_edges[u] ] = v;
temp_num_edges[u]++;
}
else if (u<v){
g->colind[ temp_num_edges[v] ] = u;
temp_num_edges[v]++;
}
}
fin.close();
printf("readGraph E=%d V=%d n=%d \n",g->E,g->V,g->n );
cout<<"Read the graph"<<endl;
/**********************************************************************************************************/
}
int main(int argc, char *argv[]){
G g;
// cout<<endl<<"checkpoint 1"<<endl;
char* file_path=argv[1];
readGraph(file_path,&g);
printf("main E=%d V=%d n=%d\n",g.E,g.V,g.n );
// cout<<"checkpoint 2"<<endl;
// cout<<"rows"<<endl;
// for (var i=0;i<(g.n) ;i++){
// cout<<g.rows[i]<<" ";
// }
// cout<<endl;
// cout<<"colind"<<endl;
// for (var i=0;i<(g.E) ;i++){
// cout<<g.colind[i]<<" ";
// }
// cout<<endl;
// cout<<"roff"<<endl;
// for (var i=0;i<(g.V+1) ;i++){
// cout<<g.roff[i]<<" ";
// }
// cout<<endl;
// cout<<"rlen"<<endl;
// for (var i=0;i<(g.V) ;i++){
// cout<<g.rlen[i]<<" ";
// }
// cout<<endl;
// hipMalloc( (void **) &d_rows, size );
// hipMalloc( (void **) &d_colind, size );
// hipMalloc( (void **) &d_roff, size );
// hipMalloc( (void **) &d_rlen, size );g->
//
// for (var i=0;i< g->n ;i++)
// rows[i] =
thrust::device_vector<uui> d_rows ( g.rows , g.rows + g.n);
thrust::device_vector<uui> d_colind (g.colind , g.colind+ g.E);
thrust::device_vector<uui> d_roff (g.roff , g.roff + g.V + 1 );
thrust::device_vector<uui> d_rlen (g.rlen , g.rlen + g.V);
thrust::device_vector<var> bitmap (GS*g.V);
thrust::fill(bitmap.begin(), bitmap.end(),0);
thrust::device_vector<uui> support(g.E);
thrust::fill(support.begin(), support.end(),0);
uui *d_rows1 = thrust::raw_pointer_cast(&d_rows[0]);
uui *d_colind1 = thrust::raw_pointer_cast(&d_colind[0]);
uui *d_roff1 = thrust::raw_pointer_cast(&d_roff[0]);
uui *d_rlen1 = thrust::raw_pointer_cast(&d_rlen[0]);
uui *d_support1 = thrust::raw_pointer_cast(&support[0]);
var *d_bitmap1 = thrust::raw_pointer_cast(&bitmap[0]);
var k=3;
var call=1;
while(call){
if (k>3)
break;
if(k==3)
{
cout<<"Calling Kernel"<<endl;
printf("E=%d V=%d n=%d K=%d\n",g.E,g.V,g.n,k );
hipEventCreate(&start);
hipEventRecord(start,0);
hipLaunchKernelGGL(( getmajorsupport), dim3(GS),dim3(BS), 0, 0, d_colind1,d_roff1,d_rows1,d_rlen1,d_bitmap1,g.V,g.E,g.n,d_support1,k);
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipDeviceSynchronize();
cout<<"Out of kernel"<<endl;
call=0;
}
}
// int i;
// cout << "support[" << 0 << "] = " << support[0] << endl;
// for( i = 0; i < support.size(); i++)
// cout << "support[" << i << "] = " << support[i] << endl;
// return 0;
hipEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time : %f ms\n" ,elapsedTime);
}
|
3b3ba0b684817f1e8c6dec4e628730fd126e6039.cu
|
//scp -r /home/awd/work/coursework/DS295/project/pp_project/parallel/* [email protected]:/home/dtanwar/Project/Parallel_Programming_Project/parallel
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <sys/time.h>
#include <functional>
#include <iostream>
#include <fstream>
#include <climits>
#include<cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define GS 5
#define BS 5
using namespace std;
typedef int uui;
typedef int var;
typedef struct{
var V; //no of vertices
var E; //no of edges
var n; //no of non empty rows
//var E;
uui *colind; //nonzeroes in each row (colind)
uui *roff; //startig offset of each row (rowoff)
uui *rlen; //length of each row
uui *rows; //indices of the non empty rows
} G;
__device__ int L;
__global__ void reset_bitmap(var *bitmap , var blockid, var V){
int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index >= V*blockid && index < V*(blockid+1)){
atomicAnd(bitmap + index , 0);
}
}
__global__ void find(var *data, var value, /*int min_idx,*/ var io_s, var rlen_i){
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if(idx >= io_s && idx<= rlen_i){
if(data[idx] == value)
atomicMin(&L, idx);
}
}
//cudamalloc colind , roff , rows , rlen , bitmap , E , V ,n , supp,k ;
__global__ void getmajorsupport(uui* d_colind, uui* d_roff, uui* d_rows, uui* d_rlen, var* bitmap, var E, var V, var n, uui* d_supp, var K){
__shared__ int broadcast[BS]; //TODO: 2d array! why?
/* if(threadIdx.x==0 && blockIdx.x==1)
// {
// printf("\nkernel threadId.x=%d blockid.x=%d E=%d V=%d n=%d K=%d\n",threadIdx.x,blockIdx.x,E,V,n,K );
// // for (var i=0;i<(n) ;i++)
// // printf("%d ",d_rows[i]);
// // printf("\n");
// // printf("rows\n");
// __syncthreads();
// printf("colind\n");
// for(var i=0;i<E;i++)
// printf("%d ",d_colind[i] );
// printf("\n");
// __syncthreads();
// // printf("roff\n" );
// // for(var i=0;i<V+1;i++)
// // printf("%d ",d_roff[i]);
// // printf("\n");
// // printf("rlen\n");
// // for(var i=0;i<V;i++)
// // printf("%d ",d_rlen[i]);
// // printf("\n");
//
//
//
// } */
var i,io_s,io_e,j,jo_s,jo_e,jo,io,c,count,k;
for (var s = 0 ; s<n ; s+=gridDim.x){
printf("Inside kernel\n");
i = d_rows[s];
io_s = d_roff[i];
io_e = io_s + d_rlen[i];
printf("Inside 4\n");
for (io=io_s ; io < io_e ; io += blockDim.x){
printf("Inside 5, io=%d", io);
c = (io + threadIdx.x < io_e) ? d_colind[io + threadIdx.x] : -1;
printf("Inside 6, c=%d ", c);
if (c > -1){
atomicOr ((bitmap + (V * blockIdx.x) +c) , 1);
broadcast[threadIdx.x] = c;
printf("Inside 1\n");
}
__syncthreads();
for (var t=0 ; t < blockDim.x ; t++){
j = broadcast[t];
printf("Inside 2\n");
if (j == -1) break;
count = 0;
jo_s = d_roff[j];
jo_e = jo_s + d_rlen[j];
for(jo = jo_s + threadIdx.x ; jo < jo_e ; jo += blockDim.x){
k = d_colind[jo];
if(bitmap[V * blockIdx.x + k] == 1){
count ++;
atomicAdd(d_supp + jo , 1);
// find<<< E/1024 +1, 1024 >>>(d_colind, k , /*&L,*/ io_s, d_rlen[i]);
for(L=0; L <= d_rlen[i] ; L++)
if (d_colind[io_s + L] == k)
break;
printf("Before: i=%d, j=%d, k=%d, l=%d\n",i,j,k,L);
atomicAdd(d_supp + io_s + L , 1);
printf("After: i=%d, j=%d, k=%d, l=%d\n",i,j,k,L);
}
}
atomicAdd(d_supp + io + t , count);
}
}
// for(var x = V*blockIdx.x, i=0; i<V/*x< V*(blockIdx.x + 1)*/ ; i++,x++){
// atomicAnd(bitmap + x , 0);
// }
atomicAnd(bitmap + (V * blockIdx.x) + c , 0);
//reset_bitmap<<< GS,BS >>> (bitmap, blockIdx.x,V);
}
__syncthreads();
}
// #include "read_graph.hpp"
ifstream fin;
ofstream fout;
string infile, outfile;
void readGraph(string filename, G *g){
// cout<<"inside readGraph"<<endl;
// infile ="../../../input/" + name + ".mmio" ; // ../../../input/amazon0302_adj.mmio
// outfile="../../output/serial/" + name + ".txt" ; // dataset+"-out.txt";
infile =filename;
cout<<infile<<endl;
fin.open(infile.c_str()); // opening the input file
fout.open(outfile.c_str()); // opening the output file
string temp;
getline(fin,temp); // readint the description line 1
getline(fin,temp); // reading the description line 2
var temp_edge; // temperory edge because edge weight is useless
var u,v; // the v1,v2 of edges
fin >> g->V>>g->V >> g->E ; // reading the MxN graph and edges
cout<< g->V<<" "<< g->E<<endl; // just checking if it worked
/**************************allocating & initializing all flag[V] to false**********************************/
// bool flag[g->V]; // tells whether particular row is empty or not
// for (var i=0 ; i < g->V ; i++) {
// flag[i] = false; // false means empty
// }
thrust::device_vector<bool> flag(g->V);
thrust::fill(flag.begin(), flag.end(),0);
/**************************allocating & initializing all roff[V+1] to zero**********************************/
g->roff = (uui *) malloc((g->V + 1) * sizeof(uui));
assert(g->roff != NULL);
for (var i=0 ; i < g->V+1 ; i++) {
g->roff[i] = 0;
//cout<<g->roff[i]<<" ";
};
//cout<<endl;
/**************************increase row offset and set flag for non empty row********************************/
for (var i=0; i<g->E; ++i) { //thrust
fin >> u >> v>>temp_int;
//cout<< u <<" "<<v <<endl;
if(u > v)
g->roff[u+1]++ , flag[u] = true;
else if(u < v)
g->roff[v+1]++ , flag[v] = true;
}
/**********************populates indexs of nonzero rows rows[n] and initilizes n (no of non empty rows)******/
g->rows = (uui *) malloc((g->V) * sizeof(uui));
g->n = 0;
var k =0;
for (var i = 0; i<g->V; i++){
if (flag[i] == true){
g->n++; //thrust
g->rows[k++] = i; //thrust
}
}
/**********************************************************************************************************/
//converting the roff from degree holder to actual usage.
uui *temp_num_edges = (uui *) malloc((g->V + 1) * sizeof(uui));
assert(temp_num_edges != NULL);
temp_num_edges[0] = 0;
//g->E= 0;
k=0;
for(var i = 0; i < g->V; i++) {
// g->E += g->roff[i];
k += g->roff[i+1];
temp_num_edges[i+1] =k;
}
for(var i= 0; i < g->V+1; i++)
g->roff[i] = temp_num_edges[i];
/**********************************************************************************************************/
g->rlen = (uui *) malloc((g->V) * sizeof(uui));
k =0;
for (var i = 0; i<g->V; i++){
if (flag[i] == true)
g->rlen[k] = g->roff[i+1] - g->roff[i];
else
g->rlen[k] = 0;
k++;
}
/**********************************************************************************************************/
//Allocate space for colind
g->colind = (uui *) malloc(g->E * sizeof(uui));
assert(g->colind != NULL);
fin.close();
fin.open(infile.c_str());
getline(fin,temp); // readint the description line 1
getline(fin,temp); // reading the description line 2
//Read V and E
//fscanf(infp, "%ld %ld\n", &(g->n), &g->E);
fin>>g->V>>g->V>>g->E;
for(var i = 0; i < g->E; i++)
g->colind[i] = 0;
//Read the edges
// while( fscanf(infp, "%u %u\n", &u, &v) != EOF ) {
for(var i=0 ; i<g->E ; i++){
fin>>u>>v>>temp_int;
if(u>v){
g->colind[ temp_num_edges[u] ] = v;
temp_num_edges[u]++;
}
else if (u<v){
g->colind[ temp_num_edges[v] ] = u;
temp_num_edges[v]++;
}
}
fin.close();
printf("readGraph E=%d V=%d n=%d \n",g->E,g->V,g->n );
cout<<"Read the graph"<<endl;
/**********************************************************************************************************/
}
int main(int argc, char *argv[]){
G g;
// cout<<endl<<"checkpoint 1"<<endl;
char* file_path=argv[1];
readGraph(file_path,&g);
printf("main E=%d V=%d n=%d\n",g.E,g.V,g.n );
// cout<<"checkpoint 2"<<endl;
// cout<<"rows"<<endl;
// for (var i=0;i<(g.n) ;i++){
// cout<<g.rows[i]<<" ";
// }
// cout<<endl;
// cout<<"colind"<<endl;
// for (var i=0;i<(g.E) ;i++){
// cout<<g.colind[i]<<" ";
// }
// cout<<endl;
// cout<<"roff"<<endl;
// for (var i=0;i<(g.V+1) ;i++){
// cout<<g.roff[i]<<" ";
// }
// cout<<endl;
// cout<<"rlen"<<endl;
// for (var i=0;i<(g.V) ;i++){
// cout<<g.rlen[i]<<" ";
// }
// cout<<endl;
// cudaMalloc( (void **) &d_rows, size );
// cudaMalloc( (void **) &d_colind, size );
// cudaMalloc( (void **) &d_roff, size );
// cudaMalloc( (void **) &d_rlen, size );g->
//
// for (var i=0;i< g->n ;i++)
// rows[i] =
thrust::device_vector<uui> d_rows ( g.rows , g.rows + g.n);
thrust::device_vector<uui> d_colind (g.colind , g.colind+ g.E);
thrust::device_vector<uui> d_roff (g.roff , g.roff + g.V + 1 );
thrust::device_vector<uui> d_rlen (g.rlen , g.rlen + g.V);
thrust::device_vector<var> bitmap (GS*g.V);
thrust::fill(bitmap.begin(), bitmap.end(),0);
thrust::device_vector<uui> support(g.E);
thrust::fill(support.begin(), support.end(),0);
uui *d_rows1 = thrust::raw_pointer_cast(&d_rows[0]);
uui *d_colind1 = thrust::raw_pointer_cast(&d_colind[0]);
uui *d_roff1 = thrust::raw_pointer_cast(&d_roff[0]);
uui *d_rlen1 = thrust::raw_pointer_cast(&d_rlen[0]);
uui *d_support1 = thrust::raw_pointer_cast(&support[0]);
var *d_bitmap1 = thrust::raw_pointer_cast(&bitmap[0]);
var k=3;
var call=1;
while(call){
if (k>3)
break;
if(k==3)
{
cout<<"Calling Kernel"<<endl;
printf("E=%d V=%d n=%d K=%d\n",g.E,g.V,g.n,k );
cudaEventCreate(&start);
cudaEventRecord(start,0);
getmajorsupport<<<GS,BS>>>(d_colind1,d_roff1,d_rows1,d_rlen1,d_bitmap1,g.V,g.E,g.n,d_support1,k);
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaDeviceSynchronize();
cout<<"Out of kernel"<<endl;
call=0;
}
}
// int i;
// cout << "support[" << 0 << "] = " << support[0] << endl;
// for( i = 0; i < support.size(); i++)
// cout << "support[" << i << "] = " << support[i] << endl;
// return 0;
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time : %f ms\n" ,elapsedTime);
}
|
f238b615e7a77bd22cb9da42058971d573843901.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2014 Luke Marcus Biagio Testa
All rights reserved.
Redistribution and use in source and binary forms are permitted
provided that the above copyright notice and this paragraph are
duplicated in all such forms and that any documentation,
advertising materials, and other materials related to such
distribution and use acknowledge that the software was developed
by the Luke Marcus Biagio Testa. The name of the
Luke Marcus Biagio Testa may not be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
*/
#include "FileHandler.h"
#include "FeatureSpace.h"
#include <sstream>
/*
* Running Information:
*
* x3 Command Line Arguments: [Dataset(.txt), output path, No Clusters]
*
* Dataset.txt is stored in assignment 1 repository root
*
*/
// Command line inputs: [Program InputFile OutputFilePath NoClusters]
int main(int argc, char *argv[])
{
// Check 2 command line inputs
if ( argc != 4 )
{
std::cout << "Needs 4 Arguments: [Program Dataset K OutputPath]" << std::endl;
std::cout << "Using " << argc << " Arguments" << std::endl;
for (int i = 0; i < argc; i++)
std::cout << argv[i] << std::endl;
return 0;
}
// Ensure file exists and open file as read only. Initialize FeatureSpace with N clusters
FileHandler File( argv[1], true );
FeatureSpace Dictionary( atoi(argv[3]) );
// Store data points in feature space
while ( File.filePtr().peek() != EOF )
{
File.filePtr() >> Dictionary;
}
// Start Timer. Find Clusters.
double executionTime = jbutil::gettime();
int iterations = Dictionary.ClusterSearch();
executionTime = jbutil::gettime() - executionTime;
hipDeviceReset();
std::cout << "v11 Execution Time: " << executionTime << " s" << std::endl;
std::cout << "v11 Iterations: " << iterations << std::endl;
// End of Timer. Found Clusters. Get Cluster-Allocated Data Points.
std::vector<FeaturePoint> data = Dictionary.getDataPoints();
FeaturePoint* centers = Dictionary.getCentroids();
std::cout << "------------------- Cluster Information -----------------" << std::endl;
for(int i = 0; i < 20 ; i++)
std::cout << "Centroid[" << i << "]: (" << centers[i].contents().first << "," << centers[i].contents().second << ")" << std::endl;
// -------------------- Output information to file ---------------------------------
/*
// Timing information
std::string log_speed(argv[2]);
log_speed.append("Timing_Information");
FileHandler speedFile(log_speed, false);
speedFile.filePtr() << executionTime;
// Iterations
std::string log_iteration(argv[2]);
log_iteration.append("Iteration_Information");
FileHandler iterationFile(log_iteration, false);
iterationFile.filePtr() << iterations;
std::cout << "Execution Time: " << executionTime << " s" << std::endl;
std::cout << "Iterations: " << iterations << std::endl;
// Output data to file. For each cluster [ip 3 on command line], for each point in cluster[i]
for(int i = 0; i < atoi(argv[3]); i++)
{
// Format output file name
std::string temp(argv[2]);
temp.append("Kmeans_Cluster_");
std::stringstream val;
val << i << "_Data";
temp.append( val.str() );
// Open Output File
FileHandler outputFile( temp, false );
//Write Cluster's Centroid to File
outputFile.filePtr() << (Point&)centers[i];
// Write cluster's data points to file
for(int j = 0; j < data.size(); j++)
{
// Output data point to file if contained within current cluster
if ( data[j].Cluster() == i )
outputFile.filePtr() << (Point&)data[j];
}
}
*/
return 0;
}
|
f238b615e7a77bd22cb9da42058971d573843901.cu
|
/*
Copyright (c) 2014 Luke Marcus Biagio Testa
All rights reserved.
Redistribution and use in source and binary forms are permitted
provided that the above copyright notice and this paragraph are
duplicated in all such forms and that any documentation,
advertising materials, and other materials related to such
distribution and use acknowledge that the software was developed
by the Luke Marcus Biagio Testa. The name of the
Luke Marcus Biagio Testa may not be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
*/
#include "FileHandler.h"
#include "FeatureSpace.h"
#include <sstream>
/*
* Running Information:
*
* x3 Command Line Arguments: [Dataset(.txt), output path, No Clusters]
*
* Dataset.txt is stored in assignment 1 repository root
*
*/
// Command line inputs: [Program InputFile OutputFilePath NoClusters]
int main(int argc, char *argv[])
{
// Check 2 command line inputs
if ( argc != 4 )
{
std::cout << "Needs 4 Arguments: [Program Dataset K OutputPath]" << std::endl;
std::cout << "Using " << argc << " Arguments" << std::endl;
for (int i = 0; i < argc; i++)
std::cout << argv[i] << std::endl;
return 0;
}
// Ensure file exists and open file as read only. Initialize FeatureSpace with N clusters
FileHandler File( argv[1], true );
FeatureSpace Dictionary( atoi(argv[3]) );
// Store data points in feature space
while ( File.filePtr().peek() != EOF )
{
File.filePtr() >> Dictionary;
}
// Start Timer. Find Clusters.
double executionTime = jbutil::gettime();
int iterations = Dictionary.ClusterSearch();
executionTime = jbutil::gettime() - executionTime;
cudaDeviceReset();
std::cout << "v11 Execution Time: " << executionTime << " s" << std::endl;
std::cout << "v11 Iterations: " << iterations << std::endl;
// End of Timer. Found Clusters. Get Cluster-Allocated Data Points.
std::vector<FeaturePoint> data = Dictionary.getDataPoints();
FeaturePoint* centers = Dictionary.getCentroids();
std::cout << "------------------- Cluster Information -----------------" << std::endl;
for(int i = 0; i < 20 ; i++)
std::cout << "Centroid[" << i << "]: (" << centers[i].contents().first << "," << centers[i].contents().second << ")" << std::endl;
// -------------------- Output information to file ---------------------------------
/*
// Timing information
std::string log_speed(argv[2]);
log_speed.append("Timing_Information");
FileHandler speedFile(log_speed, false);
speedFile.filePtr() << executionTime;
// Iterations
std::string log_iteration(argv[2]);
log_iteration.append("Iteration_Information");
FileHandler iterationFile(log_iteration, false);
iterationFile.filePtr() << iterations;
std::cout << "Execution Time: " << executionTime << " s" << std::endl;
std::cout << "Iterations: " << iterations << std::endl;
// Output data to file. For each cluster [ip 3 on command line], for each point in cluster[i]
for(int i = 0; i < atoi(argv[3]); i++)
{
// Format output file name
std::string temp(argv[2]);
temp.append("Kmeans_Cluster_");
std::stringstream val;
val << i << "_Data";
temp.append( val.str() );
// Open Output File
FileHandler outputFile( temp, false );
//Write Cluster's Centroid to File
outputFile.filePtr() << (Point&)centers[i];
// Write cluster's data points to file
for(int j = 0; j < data.size(); j++)
{
// Output data point to file if contained within current cluster
if ( data[j].Cluster() == i )
outputFile.filePtr() << (Point&)data[j];
}
}
*/
return 0;
}
|
a7b893c42e085bd347e86bcb868cf7a720818605.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sys/time.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <sys/stat.h>
#include <getopt.h>
#include <stdint.h>
#include <vector>
#include <sys/types.h>
#include <math.h>
#include "hash.h"
#include "checksum.h"
void recalcu(int chunk_size, int chunk_num, int *stat, int jump_pos, int file_len, int total_threads, char *h_file, int *match_offset, int *match_chunkid, Node *ht, int j);
Node *lookup_ht(Node *ht, int32 rc, int *chunk_id);
__device__ uint32 d_get_checksum1(char *buf1, int32 len, uint32 *d_s1, uint32 *d_s2);
__device__ void d_get_checksum2(const uint8_t *in, const size_t inlen, uint8_t *out);
__device__ uint d_hash(uint32 rc);
__device__ Node *d_lookup_ht(Node *ht, int32 rc, int *chunk_id);
__device__ bool d_char_compare(char *c1, char *c2);
__constant__ uint8_t k[16]={0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
inline __device__ bool d_char_compare(uint8_t *c1, uint8_t *c2){
if(c1[0]!=c2[0] || c1[1]!=c2[1] || c1[2]!=c2[2] || c1[3]!=c2[3]) return false;
else if(c1[4]!=c2[4] || c1[5]!=c2[5] || c1[6]!=c2[6] || c1[7]!=c2[7]) return false;
else return true;
}
inline bool char_compare(uint8_t *c1, uint8_t *c2){
if(c1[0]!=c2[0] || c1[1]!=c2[1] || c1[2]!=c2[2] || c1[3]!=c2[3]) return false;
else if(c1[4]!=c2[4] || c1[5]!=c2[5] || c1[6]!=c2[6] || c1[7]!=c2[7]) return false;
else return true;
}
__global__ void multiwarp_match(Node *ht, char *file, size_t file_len, int total_threads, int chunk_size, int chunk_num,
int *match_offset, int *match_chunkid, int *stat)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int fileBeginPos = chunk_num*chunk_size*thread_id;
int chunkBeginPos = chunk_num*thread_id;
if(fileBeginPos < file_len){
int recalcu = 1;
uint32 rc;
int chunk_id;
int match_num = 0;
int i = 0;
uint32 s1 = 0, s2 = 0;
//the char in the head of a chunk, it can be used to store as the unmatch value and use to recalcu
char chunk_head_value;
int length = chunk_size;
length = chunk_size;
for(; i < chunk_size*chunk_num;){
//chunk_size
if(fileBeginPos+i>file_len-chunk_size){
length = file_len-fileBeginPos-i;
}
if(recalcu == 1) rc = d_get_checksum1(&file[fileBeginPos + i], length, &s1, &s2);
else if(recalcu == 0){
s1 -= chunk_head_value + CHAR_OFFSET;
s2 -= chunk_size * (chunk_head_value + CHAR_OFFSET);
s1 += file[fileBeginPos+i+length-1] + CHAR_OFFSET;
s2 += s1;
rc = (s1 & 0xffff) + (s2 << 16);
}
chunk_head_value = file[fileBeginPos+i];
Node *np = d_lookup_ht(ht, rc, &chunk_id);
if(np == NULL){
recalcu = 0;
i += 1;
}
else{
uint8_t sum2[8];
d_get_checksum2((uint8_t*)&file[fileBeginPos+i], (size_t)length, (uint8_t*)sum2);
while(1){
if(d_char_compare(sum2,np->md5)){
match_chunkid[chunkBeginPos + match_num] = np->chunk_id;
match_offset[chunkBeginPos + match_num] = fileBeginPos + i;
match_num ++;
recalcu = 1;
//printf("we have match in thread %d in gpu\n",thread_id);
i += chunk_size;
break;
}
else{
np = np->next;
if(np == NULL){
recalcu = 0;
i += 1;
break;
}
}
}
}
}
//record match_num
stat[thread_id] = match_num;
}
}
__global__ void gpu_recalcu(Node *ht, char *file, int chunk_size, int chunk_num, int *match_offset, int *match_chunkid, int *stat, int region_size)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int start_t = thread_id * region_size;
//printf("thread %d start recalcu on %d thread, region size %d\n", thread_id, start_t, region_size);
for(int i=start_t; i<start_t+region_size-1; ++i){
//printf("thread %d recalcu on its %d thread\n", thread_id, i-start_t);
int t_match_num = stat[i];
int j = i+1;
int jump_pos = match_offset[chunk_num*i+t_match_num-1]+chunk_size;
if(t_match_num > 0 && stat[j] > 0 && jump_pos > match_offset[chunk_num*j]){
//printf("in gpu thread %d need recalcu\n", start_t);
int match_index = 0;
int recalcu = 1;
int chunk_id;
int j_match_num = stat[j];
int j_match_begin = chunk_num*j;
char chunk_head_value;
uint32 s1, s2, rc;
while(1){
if(recalcu == 1) rc = d_get_checksum1(&file[jump_pos], chunk_size, &s1, &s2);
else if(recalcu == 0){
s1 -= chunk_head_value + CHAR_OFFSET;
s2 -= chunk_size * (chunk_head_value + CHAR_OFFSET);
s1 += file[jump_pos+chunk_size-1] + CHAR_OFFSET;
s2 += s1;
rc = (s1 & 0xffff) + (s2 << 16);
}
while(jump_pos > match_offset[j_match_begin+match_index]){
if(match_index < j_match_num){
match_chunkid[j_match_begin+match_index] = -1;
stat[j]--;
match_index++;
}
else break;
}
if(jump_pos == match_offset[j_match_begin+match_index]) break;
Node *np = d_lookup_ht(ht, rc, &chunk_id);
if(np == NULL){
recalcu = 0;
jump_pos += 1;
}
else{
uint8_t sum2[8];
d_get_checksum2((uint8_t*)&file[jump_pos], (size_t)chunk_size, (uint8_t*)sum2);
while(1){
if(d_char_compare(sum2,np->md5)){
for(int k=j_match_begin;k<j_match_begin+chunk_num;++k){
if(match_chunkid[k]==-1 || jump_pos+chunk_size > match_offset[k]){
match_offset[k] = jump_pos;
match_chunkid[k] = chunk_id;
stat[j]++;
break;
}
}
jump_pos += chunk_size;
recalcu = 1;
break;
}
else{
np = np->next;
if(np == NULL){
chunk_head_value = file[jump_pos];
jump_pos += 1;
recalcu = 0;
break;
}
}
}
}
if(match_index >= j_match_num) break;
}
}
}
}
void recalcu(int chunk_size, int chunk_num, int *stat, int jump_pos, int file_len, int total_threads,
char *h_file, int *match_offset, int *match_chunkid, Node *ht, int j, int recalcu_region_size){
int match_index = 0;
int unmatch_index = 0; //
int recalcu = 1;
int chunk_id;
int length = chunk_size;
int j_match_num = 0;
for(int i=0;i<recalcu_region_size;++i){
j_match_num += stat[j+i];
}
int j_match_begin = chunk_num*j;
char chunk_head_value;
uint32 s1, s2, rc;
while(1){
if(recalcu == 1) rc = get_checksum1(&h_file[jump_pos], length, (int*)&s1, (int*)&s2);
else if(recalcu == 0){
s1 -= chunk_head_value + CHAR_OFFSET;
s2 -= chunk_size * (chunk_head_value + CHAR_OFFSET);
s1 += h_file[jump_pos+length-1] + CHAR_OFFSET;
s2 += s1;
rc = (s1 & 0xffff) + (s2 << 16);
}
while(jump_pos > match_offset[j_match_begin+match_index+unmatch_index]){
if(match_chunkid[j_match_begin+match_index+unmatch_index] == -1){
unmatch_index += 1;
}
else if(match_index < j_match_num){
match_chunkid[j_match_begin+match_index+unmatch_index] = -1;
//stat[j]--;
match_index++;
}
else break;
}
if(jump_pos == match_offset[j_match_begin+match_index+unmatch_index] && match_chunkid[j_match_begin+match_index+unmatch_index] != -1) break;
Node *np = lookup_ht(ht, rc, &chunk_id);
if(np == NULL){
recalcu = 0;
jump_pos += 1;
}
else{
uint8_t sum2[8];
get_checksum2((uint8_t*)&h_file[jump_pos], (size_t)length, (uint8_t*)sum2);
while(1){
//if(memcmp(sum2,np->md5,8)==0){
if(char_compare(sum2,np->md5)){
for(int k=j_match_begin;k<j_match_begin+chunk_num*recalcu_region_size;++k){
//-1-1
if(match_chunkid[k]==-1 || jump_pos+chunk_size > match_offset[k]){
match_offset[k] = jump_pos;
match_chunkid[k] = chunk_id;
//stat[j]++;
break;
}
else{
printf("error in 324 in new_file.cu\n");
}
}
jump_pos += chunk_size;
recalcu = 1;
break;
}
else{
np = np->next;
if(np == NULL){
chunk_head_value = h_file[jump_pos];
jump_pos += 1;
recalcu = 0;
break;
}
}
}
}
//chunk_size*chunk_num
if(match_index >= j_match_num) break;
//printf("match_index is %d, j_match_num is %d\n",match_index, j_match_num);
}
}
Node *lookup_ht(Node *ht, int32 rc, int *chunk_id){
uint index = hash(rc);
Node n = ht[index];
if(n.chunk_id == -1){
return NULL;
}
else{
Node *np = &n;
for(; np != NULL; np=np->next){
if(rc == np->checksum){
*chunk_id = np->chunk_id;
return np;
}
}
return NULL;
}
}
__device__ void d_get_checksum2(const uint8_t *in, const size_t inlen, uint8_t *out){
uint64_t v0 = 0x736f6d6570736575ULL;
uint64_t v1 = 0x646f72616e646f6dULL;
uint64_t v2 = 0x6c7967656e657261ULL;
uint64_t v3 = 0x7465646279746573ULL;
//uint64_t k0 = 50462976;
//uint64_t k1 = 185207048;
uint64_t k0 = U8TO64_LE(k);
uint64_t k1 = U8TO64_LE(k + 8);
uint64_t m;
const uint8_t *end = in + inlen - (inlen % sizeof(uint64_t));
const int left = inlen & 7;
uint64_t b = ((uint64_t)inlen) << 56;
v3 ^= k1;
v2 ^= k0;
v1 ^= k1;
v0 ^= k0;
for (; in != end; in += 8) {
m = U8TO64_LE(in);
v3 ^= m;
SIPROUND;
SIPROUND;
v0 ^= m;
}
switch (left) {
case 7:
b |= ((uint64_t)in[6]) << 48;
case 6:
b |= ((uint64_t)in[5]) << 40;
case 5:
b |= ((uint64_t)in[4]) << 32;
case 4:
b |= ((uint64_t)in[3]) << 24;
case 3:
b |= ((uint64_t)in[2]) << 16;
case 2:
b |= ((uint64_t)in[1]) << 8;
case 1:
b |= ((uint64_t)in[0]);
break;
case 0:
break;
}
v3 ^= b;
SIPROUND;
SIPROUND;
v0 ^= b;
v2 ^= 0xff;
SIPROUND;
SIPROUND;
SIPROUND;
SIPROUND;
b = v0 ^ v1 ^ v2 ^ v3;
U64TO8_LE(out, b);
}
__device__ uint32 d_get_checksum1(char *buf1, int32 len, uint32 *d_s1, uint32 *d_s2)
{
int32 i;
uint32 s1, s2;
char *buf = (char *)buf1;
s1 = s2 = 0;
for (i = 0; i < (len-4); i+=4) {
s2 += 4*(s1 + buf[i]) + 3*buf[i+1] + 2*buf[i+2] + buf[i+3] +
10*CHAR_OFFSET;
s1 += (buf[i+0] + buf[i+1] + buf[i+2] + buf[i+3] + 4*CHAR_OFFSET);
}
for (; i < len; i++) {
s1 += (buf[i]+CHAR_OFFSET); s2 += s1;
}
*d_s1 = s1;
*d_s2 = s2;
return (s1 & 0xffff) + (s2 << 16);
}
__device__ uint d_hash(uint32 rc){
uint p = 1867;
return (((rc>>16)& 0xffff ^ ((rc&0xffff) * p)) & 0xffff)%HASHSIZE;
}
__device__ Node* d_lookup_ht(Node *ht, int32 rc, int *chunk_id){
uint index = d_hash(rc);
Node n = ht[index];
if(n.chunk_id == -1){
return NULL;
}
else{
Node *np = &n;
for(; np != NULL; np=np->next){
if(rc == np->checksum){
*chunk_id = np->chunk_id;
return np;
}
}
return NULL;
}
}
|
a7b893c42e085bd347e86bcb868cf7a720818605.cu
|
#include <sys/time.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <sys/stat.h>
#include <getopt.h>
#include <stdint.h>
#include <vector>
#include <sys/types.h>
#include <math.h>
#include "hash.h"
#include "checksum.h"
void recalcu(int chunk_size, int chunk_num, int *stat, int jump_pos, int file_len, int total_threads, char *h_file, int *match_offset, int *match_chunkid, Node *ht, int j);
Node *lookup_ht(Node *ht, int32 rc, int *chunk_id);
__device__ uint32 d_get_checksum1(char *buf1, int32 len, uint32 *d_s1, uint32 *d_s2);
__device__ void d_get_checksum2(const uint8_t *in, const size_t inlen, uint8_t *out);
__device__ uint d_hash(uint32 rc);
__device__ Node *d_lookup_ht(Node *ht, int32 rc, int *chunk_id);
__device__ bool d_char_compare(char *c1, char *c2);
__constant__ uint8_t k[16]={0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
inline __device__ bool d_char_compare(uint8_t *c1, uint8_t *c2){
if(c1[0]!=c2[0] || c1[1]!=c2[1] || c1[2]!=c2[2] || c1[3]!=c2[3]) return false;
else if(c1[4]!=c2[4] || c1[5]!=c2[5] || c1[6]!=c2[6] || c1[7]!=c2[7]) return false;
else return true;
}
inline bool char_compare(uint8_t *c1, uint8_t *c2){
if(c1[0]!=c2[0] || c1[1]!=c2[1] || c1[2]!=c2[2] || c1[3]!=c2[3]) return false;
else if(c1[4]!=c2[4] || c1[5]!=c2[5] || c1[6]!=c2[6] || c1[7]!=c2[7]) return false;
else return true;
}
__global__ void multiwarp_match(Node *ht, char *file, size_t file_len, int total_threads, int chunk_size, int chunk_num,
int *match_offset, int *match_chunkid, int *stat)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int fileBeginPos = chunk_num*chunk_size*thread_id;
int chunkBeginPos = chunk_num*thread_id;
if(fileBeginPos < file_len){
int recalcu = 1;
uint32 rc;
int chunk_id;
int match_num = 0;
int i = 0;
uint32 s1 = 0, s2 = 0;
//the char in the head of a chunk, it can be used to store as the unmatch value and use to recalcu
char chunk_head_value;
int length = chunk_size;
length = chunk_size;
for(; i < chunk_size*chunk_num;){
//剩下的内容以及不够一个chunk_size
if(fileBeginPos+i>file_len-chunk_size){
length = file_len-fileBeginPos-i;
}
if(recalcu == 1) rc = d_get_checksum1(&file[fileBeginPos + i], length, &s1, &s2);
else if(recalcu == 0){
s1 -= chunk_head_value + CHAR_OFFSET;
s2 -= chunk_size * (chunk_head_value + CHAR_OFFSET);
s1 += file[fileBeginPos+i+length-1] + CHAR_OFFSET;
s2 += s1;
rc = (s1 & 0xffff) + (s2 << 16);
}
chunk_head_value = file[fileBeginPos+i];
Node *np = d_lookup_ht(ht, rc, &chunk_id);
if(np == NULL){
recalcu = 0;
i += 1;
}
else{
uint8_t sum2[8];
d_get_checksum2((uint8_t*)&file[fileBeginPos+i], (size_t)length, (uint8_t*)sum2);
while(1){
if(d_char_compare(sum2,np->md5)){
match_chunkid[chunkBeginPos + match_num] = np->chunk_id;
match_offset[chunkBeginPos + match_num] = fileBeginPos + i;
match_num ++;
recalcu = 1;
//printf("we have match in thread %d in gpu\n",thread_id);
i += chunk_size;
break;
}
else{
np = np->next;
if(np == NULL){
recalcu = 0;
i += 1;
break;
}
}
}
}
}
//record match_num
stat[thread_id] = match_num;
}
}
__global__ void gpu_recalcu(Node *ht, char *file, int chunk_size, int chunk_num, int *match_offset, int *match_chunkid, int *stat, int region_size)
{
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int start_t = thread_id * region_size;
//printf("thread %d start recalcu on %d thread, region size %d\n", thread_id, start_t, region_size);
for(int i=start_t; i<start_t+region_size-1; ++i){
//printf("thread %d recalcu on its %d thread\n", thread_id, i-start_t);
int t_match_num = stat[i];
int j = i+1;
int jump_pos = match_offset[chunk_num*i+t_match_num-1]+chunk_size;
if(t_match_num > 0 && stat[j] > 0 && jump_pos > match_offset[chunk_num*j]){
//printf("in gpu thread %d need recalcu\n", start_t);
int match_index = 0;
int recalcu = 1;
int chunk_id;
int j_match_num = stat[j];
int j_match_begin = chunk_num*j;
char chunk_head_value;
uint32 s1, s2, rc;
while(1){
if(recalcu == 1) rc = d_get_checksum1(&file[jump_pos], chunk_size, &s1, &s2);
else if(recalcu == 0){
s1 -= chunk_head_value + CHAR_OFFSET;
s2 -= chunk_size * (chunk_head_value + CHAR_OFFSET);
s1 += file[jump_pos+chunk_size-1] + CHAR_OFFSET;
s2 += s1;
rc = (s1 & 0xffff) + (s2 << 16);
}
while(jump_pos > match_offset[j_match_begin+match_index]){
if(match_index < j_match_num){
match_chunkid[j_match_begin+match_index] = -1;
stat[j]--;
match_index++;
}
else break;
}
if(jump_pos == match_offset[j_match_begin+match_index]) break;
Node *np = d_lookup_ht(ht, rc, &chunk_id);
if(np == NULL){
recalcu = 0;
jump_pos += 1;
}
else{
uint8_t sum2[8];
d_get_checksum2((uint8_t*)&file[jump_pos], (size_t)chunk_size, (uint8_t*)sum2);
while(1){
if(d_char_compare(sum2,np->md5)){
for(int k=j_match_begin;k<j_match_begin+chunk_num;++k){
if(match_chunkid[k]==-1 || jump_pos+chunk_size > match_offset[k]){
match_offset[k] = jump_pos;
match_chunkid[k] = chunk_id;
stat[j]++;
break;
}
}
jump_pos += chunk_size;
recalcu = 1;
break;
}
else{
np = np->next;
if(np == NULL){
chunk_head_value = file[jump_pos];
jump_pos += 1;
recalcu = 0;
break;
}
}
}
}
if(match_index >= j_match_num) break;
}
}
}
}
void recalcu(int chunk_size, int chunk_num, int *stat, int jump_pos, int file_len, int total_threads,
char *h_file, int *match_offset, int *match_chunkid, Node *ht, int j, int recalcu_region_size){
int match_index = 0;
int unmatch_index = 0; //
int recalcu = 1;
int chunk_id;
int length = chunk_size;
int j_match_num = 0;
for(int i=0;i<recalcu_region_size;++i){
j_match_num += stat[j+i];
}
int j_match_begin = chunk_num*j;
char chunk_head_value;
uint32 s1, s2, rc;
while(1){
if(recalcu == 1) rc = get_checksum1(&h_file[jump_pos], length, (int*)&s1, (int*)&s2);
else if(recalcu == 0){
s1 -= chunk_head_value + CHAR_OFFSET;
s2 -= chunk_size * (chunk_head_value + CHAR_OFFSET);
s1 += h_file[jump_pos+length-1] + CHAR_OFFSET;
s2 += s1;
rc = (s1 & 0xffff) + (s2 << 16);
}
while(jump_pos > match_offset[j_match_begin+match_index+unmatch_index]){
if(match_chunkid[j_match_begin+match_index+unmatch_index] == -1){
unmatch_index += 1;
}
else if(match_index < j_match_num){
match_chunkid[j_match_begin+match_index+unmatch_index] = -1;
//stat[j]--;
match_index++;
}
else break;
}
if(jump_pos == match_offset[j_match_begin+match_index+unmatch_index] && match_chunkid[j_match_begin+match_index+unmatch_index] != -1) break;
Node *np = lookup_ht(ht, rc, &chunk_id);
if(np == NULL){
recalcu = 0;
jump_pos += 1;
}
else{
uint8_t sum2[8];
get_checksum2((uint8_t*)&h_file[jump_pos], (size_t)length, (uint8_t*)sum2);
while(1){
//if(memcmp(sum2,np->md5,8)==0){
if(char_compare(sum2,np->md5)){
for(int k=j_match_begin;k<j_match_begin+chunk_num*recalcu_region_size;++k){
//已经被置为-1或者目前还没有但马上会被置为-1的
if(match_chunkid[k]==-1 || jump_pos+chunk_size > match_offset[k]){
match_offset[k] = jump_pos;
match_chunkid[k] = chunk_id;
//stat[j]++;
break;
}
else{
printf("error in 324 in new_file.cu\n");
}
}
jump_pos += chunk_size;
recalcu = 1;
break;
}
else{
np = np->next;
if(np == NULL){
chunk_head_value = h_file[jump_pos];
jump_pos += 1;
recalcu = 0;
break;
}
}
}
}
//还一种可能就是整个chunk_size*chunk_num都没有匹配
if(match_index >= j_match_num) break;
//printf("match_index is %d, j_match_num is %d\n",match_index, j_match_num);
}
}
Node *lookup_ht(Node *ht, int32 rc, int *chunk_id){
uint index = hash(rc);
Node n = ht[index];
if(n.chunk_id == -1){
return NULL;
}
else{
Node *np = &n;
for(; np != NULL; np=np->next){
if(rc == np->checksum){
*chunk_id = np->chunk_id;
return np;
}
}
return NULL;
}
}
__device__ void d_get_checksum2(const uint8_t *in, const size_t inlen, uint8_t *out){
uint64_t v0 = 0x736f6d6570736575ULL;
uint64_t v1 = 0x646f72616e646f6dULL;
uint64_t v2 = 0x6c7967656e657261ULL;
uint64_t v3 = 0x7465646279746573ULL;
//uint64_t k0 = 50462976;
//uint64_t k1 = 185207048;
uint64_t k0 = U8TO64_LE(k);
uint64_t k1 = U8TO64_LE(k + 8);
uint64_t m;
const uint8_t *end = in + inlen - (inlen % sizeof(uint64_t));
const int left = inlen & 7;
uint64_t b = ((uint64_t)inlen) << 56;
v3 ^= k1;
v2 ^= k0;
v1 ^= k1;
v0 ^= k0;
for (; in != end; in += 8) {
m = U8TO64_LE(in);
v3 ^= m;
SIPROUND;
SIPROUND;
v0 ^= m;
}
switch (left) {
case 7:
b |= ((uint64_t)in[6]) << 48;
case 6:
b |= ((uint64_t)in[5]) << 40;
case 5:
b |= ((uint64_t)in[4]) << 32;
case 4:
b |= ((uint64_t)in[3]) << 24;
case 3:
b |= ((uint64_t)in[2]) << 16;
case 2:
b |= ((uint64_t)in[1]) << 8;
case 1:
b |= ((uint64_t)in[0]);
break;
case 0:
break;
}
v3 ^= b;
SIPROUND;
SIPROUND;
v0 ^= b;
v2 ^= 0xff;
SIPROUND;
SIPROUND;
SIPROUND;
SIPROUND;
b = v0 ^ v1 ^ v2 ^ v3;
U64TO8_LE(out, b);
}
__device__ uint32 d_get_checksum1(char *buf1, int32 len, uint32 *d_s1, uint32 *d_s2)
{
int32 i;
uint32 s1, s2;
char *buf = (char *)buf1;
s1 = s2 = 0;
for (i = 0; i < (len-4); i+=4) {
s2 += 4*(s1 + buf[i]) + 3*buf[i+1] + 2*buf[i+2] + buf[i+3] +
10*CHAR_OFFSET;
s1 += (buf[i+0] + buf[i+1] + buf[i+2] + buf[i+3] + 4*CHAR_OFFSET);
}
for (; i < len; i++) {
s1 += (buf[i]+CHAR_OFFSET); s2 += s1;
}
*d_s1 = s1;
*d_s2 = s2;
return (s1 & 0xffff) + (s2 << 16);
}
__device__ uint d_hash(uint32 rc){
uint p = 1867;
return (((rc>>16)& 0xffff ^ ((rc&0xffff) * p)) & 0xffff)%HASHSIZE;
}
__device__ Node* d_lookup_ht(Node *ht, int32 rc, int *chunk_id){
uint index = d_hash(rc);
Node n = ht[index];
if(n.chunk_id == -1){
return NULL;
}
else{
Node *np = &n;
for(; np != NULL; np=np->next){
if(rc == np->checksum){
*chunk_id = np->chunk_id;
return np;
}
}
return NULL;
}
}
|
92858f266ebc73dbf98ef07fdbe43837829d5974.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
#define blockSize 256
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
namespace StreamCompaction {
namespace Naive {
__device__ int threadIndex() {
return (blockIdx.x * blockDim.x) + threadIdx.x;
}
__global__ void kernAdd(int d, int n, int *odata, int *idata) {
int index = threadIndex();
if (index >= n) return;
odata[index] = (index < d ? 0 : idata[index - d]) + idata[index];
}
__global__ void kernShiftRight(int n, int *odata, int *idata) {
int index = threadIndex();
if (index == 0) odata[0] = 0;
if (index >= n) return;
odata[index] = idata[index - 1];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO
int* dev_idata;
int* dev_odata;
hipMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_idata failed!");
hipMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_odata failed!");
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
////////////
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
////////////////
int numBlocks = getNumBlocks(blockSize, n);
for (int d = 1; d < n * 2; d *= 2) {
kernAdd << <numBlocks, blockSize >> >(d, n, dev_odata, dev_idata);
int *swap = dev_idata;
dev_idata = dev_odata;
dev_odata = swap;
}
kernShiftRight << <numBlocks, blockSize >> >(n, dev_odata, dev_idata);
///////////
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("%f\n", milliseconds);
hipEventDestroy(start);
hipEventDestroy(stop);
/////////
hipMemcpy(odata, dev_odata, n* sizeof(int), hipMemcpyDeviceToHost);
odata[0] = 0;
hipFree(dev_idata);
hipFree(dev_odata);
}
}
}
|
92858f266ebc73dbf98ef07fdbe43837829d5974.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
#define blockSize 256
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
namespace StreamCompaction {
namespace Naive {
__device__ int threadIndex() {
return (blockIdx.x * blockDim.x) + threadIdx.x;
}
__global__ void kernAdd(int d, int n, int *odata, int *idata) {
int index = threadIndex();
if (index >= n) return;
odata[index] = (index < d ? 0 : idata[index - d]) + idata[index];
}
__global__ void kernShiftRight(int n, int *odata, int *idata) {
int index = threadIndex();
if (index == 0) odata[0] = 0;
if (index >= n) return;
odata[index] = idata[index - 1];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO
int* dev_idata;
int* dev_odata;
cudaMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_idata failed!");
cudaMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_odata failed!");
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
////////////
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
////////////////
int numBlocks = getNumBlocks(blockSize, n);
for (int d = 1; d < n * 2; d *= 2) {
kernAdd << <numBlocks, blockSize >> >(d, n, dev_odata, dev_idata);
int *swap = dev_idata;
dev_idata = dev_odata;
dev_odata = swap;
}
kernShiftRight << <numBlocks, blockSize >> >(n, dev_odata, dev_idata);
///////////
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%f\n", milliseconds);
cudaEventDestroy(start);
cudaEventDestroy(stop);
/////////
cudaMemcpy(odata, dev_odata, n* sizeof(int), cudaMemcpyDeviceToHost);
odata[0] = 0;
cudaFree(dev_idata);
cudaFree(dev_odata);
}
}
}
|
594d829d76c0e8b5c2c16b1553dbcfe45bda1153.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/native/hip/GridSampler.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <c10/macros/Macros.h>
namespace at { namespace native {
using namespace at::cuda::detail;
using at::native::detail::GridSamplerInterpolation;
using at::native::detail::GridSamplerPadding;
namespace {
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_2d_kernel(
const int nthreads,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> output,
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_H = input.sizes[2];
int inp_W = input.sizes[3];
int out_H = grid.sizes[1];
int out_W = grid.sizes[2];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sH = input.strides[2];
int inp_sW = input.strides[3];
int grid_sN = grid.strides[0];
int grid_sH = grid.strides[1];
int grid_sW = grid.strides[2];
int grid_sCoor = grid.strides[3];
int out_sN = output.strides[0];
int out_sC = output.strides[1];
int out_sH = output.strides[2];
int out_sW = output.strides[3];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int n = index / (out_H * out_W);
const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW;
// get the corresponding input x, y co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners);
iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get NE, NW, SE, SW pixel values from (x, y)
int ix_nw = static_cast<int>(::floor(ix));
int iy_nw = static_cast<int>(::floor(iy));
int ix_ne = ix_nw + 1;
int iy_ne = iy_nw;
int ix_sw = ix_nw;
int iy_sw = iy_nw + 1;
int ix_se = ix_nw + 1;
int iy_se = iy_nw + 1;
// get surfaces to each neighbor:
scalar_t nw = (ix_se - ix) * (iy_se - iy);
scalar_t ne = (ix - ix_sw) * (iy_sw - iy);
scalar_t sw = (ix_ne - ix) * (iy - iy_ne);
scalar_t se = (ix - ix_nw) * (iy - iy_nw);
// calculate bilinear weighted pixel value and set output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
*out_ptr_NCHW = static_cast<scalar_t>(0);
if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw;
}
if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne;
}
if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw;
}
if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se;
}
}
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
// assign nearest neighor pixel value to output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) {
*out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW];
} else {
*out_ptr_NCHW = static_cast<scalar_t>(0);
}
}
}
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_3d_kernel(
const int nthreads,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> output,
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_D = input.sizes[2];
int inp_H = input.sizes[3];
int inp_W = input.sizes[4];
int out_D = grid.sizes[1];
int out_H = grid.sizes[2];
int out_W = grid.sizes[3];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sD = input.strides[2];
int inp_sH = input.strides[3];
int inp_sW = input.strides[4];
int grid_sN = grid.strides[0];
int grid_sD = grid.strides[1];
int grid_sH = grid.strides[2];
int grid_sW = grid.strides[3];
int grid_sCoor = grid.strides[4];
int out_sN = output.strides[0];
int out_sC = output.strides[1];
int out_sD = output.strides[2];
int out_sH = output.strides[3];
int out_sW = output.strides[4];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int d = (index / (out_H * out_W)) % out_D;
const int n = index / (out_D * out_H * out_W);
const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor];
ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners);
iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners);
iz = grid_sampler_compute_source_index(iz, inp_D, padding_mode, align_corners);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
int ix_tnw = static_cast<int>(::floor(ix));
int iy_tnw = static_cast<int>(::floor(iy));
int iz_tnw = static_cast<int>(::floor(iz));
int ix_tne = ix_tnw + 1;
int iy_tne = iy_tnw;
int iz_tne = iz_tnw;
int ix_tsw = ix_tnw;
int iy_tsw = iy_tnw + 1;
int iz_tsw = iz_tnw;
int ix_tse = ix_tnw + 1;
int iy_tse = iy_tnw + 1;
int iz_tse = iz_tnw;
int ix_bnw = ix_tnw;
int iy_bnw = iy_tnw;
int iz_bnw = iz_tnw + 1;
int ix_bne = ix_tnw + 1;
int iy_bne = iy_tnw;
int iz_bne = iz_tnw + 1;
int ix_bsw = ix_tnw;
int iy_bsw = iy_tnw + 1;
int iz_bsw = iz_tnw + 1;
int ix_bse = ix_tnw + 1;
int iy_bse = iy_tnw + 1;
int iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
// (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne
// + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse
// + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne
// + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse
*out_ptr_NCDHW = static_cast<scalar_t>(0);
if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw;
}
if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne;
}
if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw;
}
if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse;
}
if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw;
}
if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne;
}
if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw;
}
if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse;
}
}
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
int iz_nearest = static_cast<int>(::round(iz));
// assign nearest neighor pixel value to output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW];
} else {
*out_ptr_NCDHW = static_cast<scalar_t>(0);
}
}
}
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_2d_backward_kernel(
const int nthreads,
TensorInfo<scalar_t, int> grad_output,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> grad_input, // initialized to zeros
TensorInfo<scalar_t, int> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_H = input.sizes[2];
int inp_W = input.sizes[3];
int out_H = grid.sizes[1];
int out_W = grid.sizes[2];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sH = input.strides[2];
int inp_sW = input.strides[3];
int grid_sN = grid.strides[0];
int grid_sH = grid.strides[1];
int grid_sW = grid.strides[2];
int grid_sCoor = grid.strides[3];
int gOut_sN = grad_output.strides[0];
int gOut_sC = grad_output.strides[1];
int gOut_sH = grad_output.strides[2];
int gOut_sW = grad_output.strides[3];
int gInp_sN = grad_input.strides[0];
int gInp_sC = grad_input.strides[1];
int gInp_sH = grad_input.strides[2];
int gInp_sW = grad_input.strides[3];
int gGrid_sW = grad_grid.strides[2];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int n = index / (out_H * out_W);
const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW;
// get the corresponding input x, y co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
// multipliers for gradients on ix and iy
scalar_t gix_mult, giy_mult;
ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult);
iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get NE, NW, SE, SW pixel values from (x, y)
int ix_nw = static_cast<int>(::floor(ix));
int iy_nw = static_cast<int>(::floor(iy));
int ix_ne = ix_nw + 1;
int iy_ne = iy_nw;
int ix_sw = ix_nw;
int iy_sw = iy_nw + 1;
int ix_se = ix_nw + 1;
int iy_se = iy_nw + 1;
// get surfaces to each neighbor:
scalar_t nw = (ix_se - ix) * (iy_se - iy);
scalar_t ne = (ix - ix_sw) * (iy_sw - iy);
scalar_t sw = (ix_ne - ix) * (iy - iy_ne);
scalar_t se = (ix - ix_nw) * (iy - iy_nw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
scalar_t gOut = *gOut_ptr_NCHW;
// calculate and set grad_input
safe_add_2d(gInp_ptr_NC, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut);
safe_add_2d(gInp_ptr_NC, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut);
safe_add_2d(gInp_ptr_NC, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut);
safe_add_2d(gInp_ptr_NC, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut);
// calculate grad_grid
if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) {
scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW];
gix -= nw_val * (iy_se - iy) * gOut;
giy -= nw_val * (ix_se - ix) * gOut;
}
if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) {
scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW];
gix += ne_val * (iy_sw - iy) * gOut;
giy -= ne_val * (ix - ix_sw) * gOut;
}
if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) {
scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW];
gix -= sw_val * (iy - iy_ne) * gOut;
giy += sw_val * (ix_ne - ix) * gOut;
}
if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) {
scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW];
gix += se_val * (iy - iy_nw) * gOut;
giy += se_val * (ix - ix_nw) * gOut;
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW
// 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1]
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = gix_mult * gix;
gGrid_ptr_NHW[1] = giy_mult * giy;
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
// assign nearest neighor pixel value to output pixel
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
for (int c = 0; c < C; ++c, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
// calculate and set grad_input
safe_add_2d(gInp_ptr_NC, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW);
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW
// 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1]
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = static_cast<scalar_t>(0);
gGrid_ptr_NHW[1] = static_cast<scalar_t>(0);
}
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_3d_backward_kernel(
const int nthreads,
TensorInfo<scalar_t, int> grad_output,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> grad_input, // initialized to zeros
TensorInfo<scalar_t, int> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_D = input.sizes[2];
int inp_H = input.sizes[3];
int inp_W = input.sizes[4];
int out_D = grid.sizes[1];
int out_H = grid.sizes[2];
int out_W = grid.sizes[3];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sD = input.strides[2];
int inp_sH = input.strides[3];
int inp_sW = input.strides[4];
int grid_sN = grid.strides[0];
int grid_sD = grid.strides[1];
int grid_sH = grid.strides[2];
int grid_sW = grid.strides[3];
int grid_sCoor = grid.strides[4];
int gOut_sN = grad_output.strides[0];
int gOut_sC = grad_output.strides[1];
int gOut_sD = grad_output.strides[2];
int gOut_sH = grad_output.strides[3];
int gOut_sW = grad_output.strides[4];
int gInp_sN = grad_input.strides[0];
int gInp_sC = grad_input.strides[1];
int gInp_sD = grad_input.strides[2];
int gInp_sH = grad_input.strides[3];
int gInp_sW = grad_input.strides[4];
int gGrid_sW = grad_grid.strides[3];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int d = (index / (out_H * out_W)) % out_D;
const int n = index / (out_D * out_H * out_W);
const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor];
// multipliers for gradients on ix, iy, and iz
scalar_t gix_mult, giy_mult, giz_mult;
ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult);
iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult);
iz = grid_sampler_compute_source_index_set_grad(iz, inp_D, padding_mode, align_corners, &giz_mult);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
int ix_tnw = static_cast<int>(::floor(ix));
int iy_tnw = static_cast<int>(::floor(iy));
int iz_tnw = static_cast<int>(::floor(iz));
int ix_tne = ix_tnw + 1;
int iy_tne = iy_tnw;
int iz_tne = iz_tnw;
int ix_tsw = ix_tnw;
int iy_tsw = iy_tnw + 1;
int iz_tsw = iz_tnw;
int ix_tse = ix_tnw + 1;
int iy_tse = iy_tnw + 1;
int iz_tse = iz_tnw;
int ix_bnw = ix_tnw;
int iy_bnw = iy_tnw;
int iz_bnw = iz_tnw + 1;
int ix_bne = ix_tnw + 1;
int iy_bne = iy_tnw;
int iz_bne = iz_tnw + 1;
int ix_bsw = ix_tnw;
int iy_bsw = iy_tnw + 1;
int iz_bsw = iz_tnw + 1;
int ix_bse = ix_tnw + 1;
int iy_bse = iy_tnw + 1;
int iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
// calculate bilinear weighted pixel value and set output pixel
for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC, inp_ptr_NC += inp_sC) {
scalar_t gOut = *gOut_ptr_NCDHW;
// calculate and set grad_input
safe_add_3d(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut);
safe_add_3d(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut);
safe_add_3d(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut);
safe_add_3d(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut);
safe_add_3d(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut);
safe_add_3d(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut);
safe_add_3d(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut);
safe_add_3d(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut);
// calculate grad_grid
if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) {
scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW];
gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut;
giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut;
giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut;
}
if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) {
scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW];
gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut;
giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut;
giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut;
}
if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) {
scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW];
gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut;
giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut;
giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut;
}
if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) {
scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW];
gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut;
giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut;
giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut;
}
if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) {
scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW];
gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut;
giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut;
giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut;
}
if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) {
scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW];
gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut;
giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut;
giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut;
}
if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) {
scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW];
gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut;
giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut;
giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut;
}
if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) {
scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW];
gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut;
giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut;
giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut;
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW
// 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2]
scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NDHW[0] = gix_mult * gix;
gGrid_ptr_NDHW[1] = giy_mult * giy;
gGrid_ptr_NDHW[2] = giz_mult * giz;
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
int iz_nearest = static_cast<int>(::round(iz));
// assign nearest neighor pixel value to output pixel
scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC) {
// calculate and set grad_input
safe_add_3d(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest,
gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW);
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW
// 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2]
scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0);
gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0);
gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0);
}
}
}
} // namespace
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
Tensor grid_sampler_2d_cuda(const Tensor& input, const Tensor& grid,
int64_t interpolation_mode, int64_t padding_mode,
bool align_corners) {
auto N = input.size(0);
auto H = grid.size(1);
auto W = grid.size(2);
auto output = at::empty({N, input.size(1), H, W}, input.options());
int count = static_cast<int>(N * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] {
hipLaunchKernelGGL(( grid_sampler_2d_kernel<scalar_t>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return output;
}
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
Tensor grid_sampler_3d_cuda(const Tensor& input, const Tensor& grid,
int64_t interpolation_mode, int64_t padding_mode,
bool align_corners) {
auto N = input.size(0);
auto D = grid.size(1);
auto H = grid.size(2);
auto W = grid.size(3);
auto output = at::empty({N, input.size(1), D, H, W}, input.options());
int count = static_cast<int>(N * D * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] {
hipLaunchKernelGGL(( grid_sampler_3d_kernel<scalar_t>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return output;
}
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
std::tuple<Tensor, Tensor>
grid_sampler_2d_backward_cuda(const Tensor& grad_output, const Tensor& input,
const Tensor& grid, int64_t interpolation_mode,
int64_t padding_mode, bool align_corners) {
auto N = input.size(0);
auto H = grid.size(1);
auto W = grid.size(2);
auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto grad_grid = at::empty_like(grid, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
int count = static_cast<int>(N * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_backward_cuda", [&] {
hipLaunchKernelGGL(( grid_sampler_2d_backward_kernel<scalar_t>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
getTensorInfo<scalar_t, int>(grad_output),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(grad_input),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return std::make_tuple(grad_input, grad_grid);
}
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
std::tuple<Tensor, Tensor>
grid_sampler_3d_backward_cuda(const Tensor& grad_output, const Tensor& input,
const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode,
bool align_corners) {
auto N = input.size(0);
auto D = grid.size(1);
auto H = grid.size(2);
auto W = grid.size(3);
auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto grad_grid = at::empty_like(grid, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
int count = static_cast<int>(N * D * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_backward_cuda", [&] {
hipLaunchKernelGGL(( grid_sampler_3d_backward_kernel<scalar_t>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
getTensorInfo<scalar_t, int>(grad_output),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(grad_input),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return std::make_tuple(grad_input, grad_grid);
}
}} // namespace at::native
|
594d829d76c0e8b5c2c16b1553dbcfe45bda1153.cu
|
#include <ATen/ATen.h>
#include <ATen/native/cuda/GridSampler.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <c10/macros/Macros.h>
namespace at { namespace native {
using namespace at::cuda::detail;
using at::native::detail::GridSamplerInterpolation;
using at::native::detail::GridSamplerPadding;
namespace {
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_2d_kernel(
const int nthreads,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> output,
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_H = input.sizes[2];
int inp_W = input.sizes[3];
int out_H = grid.sizes[1];
int out_W = grid.sizes[2];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sH = input.strides[2];
int inp_sW = input.strides[3];
int grid_sN = grid.strides[0];
int grid_sH = grid.strides[1];
int grid_sW = grid.strides[2];
int grid_sCoor = grid.strides[3];
int out_sN = output.strides[0];
int out_sC = output.strides[1];
int out_sH = output.strides[2];
int out_sW = output.strides[3];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int n = index / (out_H * out_W);
const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW;
// get the corresponding input x, y co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners);
iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get NE, NW, SE, SW pixel values from (x, y)
int ix_nw = static_cast<int>(::floor(ix));
int iy_nw = static_cast<int>(::floor(iy));
int ix_ne = ix_nw + 1;
int iy_ne = iy_nw;
int ix_sw = ix_nw;
int iy_sw = iy_nw + 1;
int ix_se = ix_nw + 1;
int iy_se = iy_nw + 1;
// get surfaces to each neighbor:
scalar_t nw = (ix_se - ix) * (iy_se - iy);
scalar_t ne = (ix - ix_sw) * (iy_sw - iy);
scalar_t sw = (ix_ne - ix) * (iy - iy_ne);
scalar_t se = (ix - ix_nw) * (iy - iy_nw);
// calculate bilinear weighted pixel value and set output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
*out_ptr_NCHW = static_cast<scalar_t>(0);
if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw;
}
if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne;
}
if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw;
}
if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se;
}
}
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
// assign nearest neighor pixel value to output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) {
*out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW];
} else {
*out_ptr_NCHW = static_cast<scalar_t>(0);
}
}
}
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_3d_kernel(
const int nthreads,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> output,
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_D = input.sizes[2];
int inp_H = input.sizes[3];
int inp_W = input.sizes[4];
int out_D = grid.sizes[1];
int out_H = grid.sizes[2];
int out_W = grid.sizes[3];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sD = input.strides[2];
int inp_sH = input.strides[3];
int inp_sW = input.strides[4];
int grid_sN = grid.strides[0];
int grid_sD = grid.strides[1];
int grid_sH = grid.strides[2];
int grid_sW = grid.strides[3];
int grid_sCoor = grid.strides[4];
int out_sN = output.strides[0];
int out_sC = output.strides[1];
int out_sD = output.strides[2];
int out_sH = output.strides[3];
int out_sW = output.strides[4];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int d = (index / (out_H * out_W)) % out_D;
const int n = index / (out_D * out_H * out_W);
const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor];
ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners);
iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners);
iz = grid_sampler_compute_source_index(iz, inp_D, padding_mode, align_corners);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
int ix_tnw = static_cast<int>(::floor(ix));
int iy_tnw = static_cast<int>(::floor(iy));
int iz_tnw = static_cast<int>(::floor(iz));
int ix_tne = ix_tnw + 1;
int iy_tne = iy_tnw;
int iz_tne = iz_tnw;
int ix_tsw = ix_tnw;
int iy_tsw = iy_tnw + 1;
int iz_tsw = iz_tnw;
int ix_tse = ix_tnw + 1;
int iy_tse = iy_tnw + 1;
int iz_tse = iz_tnw;
int ix_bnw = ix_tnw;
int iy_bnw = iy_tnw;
int iz_bnw = iz_tnw + 1;
int ix_bne = ix_tnw + 1;
int iy_bne = iy_tnw;
int iz_bne = iz_tnw + 1;
int ix_bsw = ix_tnw;
int iy_bsw = iy_tnw + 1;
int iz_bsw = iz_tnw + 1;
int ix_bse = ix_tnw + 1;
int iy_bse = iy_tnw + 1;
int iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
// (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne
// + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse
// + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne
// + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse
*out_ptr_NCDHW = static_cast<scalar_t>(0);
if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw;
}
if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne;
}
if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw;
}
if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse;
}
if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw;
}
if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne;
}
if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw;
}
if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse;
}
}
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
int iz_nearest = static_cast<int>(::round(iz));
// assign nearest neighor pixel value to output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW];
} else {
*out_ptr_NCDHW = static_cast<scalar_t>(0);
}
}
}
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_2d_backward_kernel(
const int nthreads,
TensorInfo<scalar_t, int> grad_output,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> grad_input, // initialized to zeros
TensorInfo<scalar_t, int> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_H = input.sizes[2];
int inp_W = input.sizes[3];
int out_H = grid.sizes[1];
int out_W = grid.sizes[2];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sH = input.strides[2];
int inp_sW = input.strides[3];
int grid_sN = grid.strides[0];
int grid_sH = grid.strides[1];
int grid_sW = grid.strides[2];
int grid_sCoor = grid.strides[3];
int gOut_sN = grad_output.strides[0];
int gOut_sC = grad_output.strides[1];
int gOut_sH = grad_output.strides[2];
int gOut_sW = grad_output.strides[3];
int gInp_sN = grad_input.strides[0];
int gInp_sC = grad_input.strides[1];
int gInp_sH = grad_input.strides[2];
int gInp_sW = grad_input.strides[3];
int gGrid_sW = grad_grid.strides[2];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int n = index / (out_H * out_W);
const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW;
// get the corresponding input x, y co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
// multipliers for gradients on ix and iy
scalar_t gix_mult, giy_mult;
ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult);
iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get NE, NW, SE, SW pixel values from (x, y)
int ix_nw = static_cast<int>(::floor(ix));
int iy_nw = static_cast<int>(::floor(iy));
int ix_ne = ix_nw + 1;
int iy_ne = iy_nw;
int ix_sw = ix_nw;
int iy_sw = iy_nw + 1;
int ix_se = ix_nw + 1;
int iy_se = iy_nw + 1;
// get surfaces to each neighbor:
scalar_t nw = (ix_se - ix) * (iy_se - iy);
scalar_t ne = (ix - ix_sw) * (iy_sw - iy);
scalar_t sw = (ix_ne - ix) * (iy - iy_ne);
scalar_t se = (ix - ix_nw) * (iy - iy_nw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
scalar_t gOut = *gOut_ptr_NCHW;
// calculate and set grad_input
safe_add_2d(gInp_ptr_NC, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut);
safe_add_2d(gInp_ptr_NC, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut);
safe_add_2d(gInp_ptr_NC, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut);
safe_add_2d(gInp_ptr_NC, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut);
// calculate grad_grid
if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) {
scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW];
gix -= nw_val * (iy_se - iy) * gOut;
giy -= nw_val * (ix_se - ix) * gOut;
}
if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) {
scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW];
gix += ne_val * (iy_sw - iy) * gOut;
giy -= ne_val * (ix - ix_sw) * gOut;
}
if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) {
scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW];
gix -= sw_val * (iy - iy_ne) * gOut;
giy += sw_val * (ix_ne - ix) * gOut;
}
if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) {
scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW];
gix += se_val * (iy - iy_nw) * gOut;
giy += se_val * (ix - ix_nw) * gOut;
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW
// 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1]
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = gix_mult * gix;
gGrid_ptr_NHW[1] = giy_mult * giy;
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
// assign nearest neighor pixel value to output pixel
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
for (int c = 0; c < C; ++c, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
// calculate and set grad_input
safe_add_2d(gInp_ptr_NC, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW);
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW
// 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1]
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = static_cast<scalar_t>(0);
gGrid_ptr_NHW[1] = static_cast<scalar_t>(0);
}
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_3d_backward_kernel(
const int nthreads,
TensorInfo<scalar_t, int> grad_output,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> grad_input, // initialized to zeros
TensorInfo<scalar_t, int> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_D = input.sizes[2];
int inp_H = input.sizes[3];
int inp_W = input.sizes[4];
int out_D = grid.sizes[1];
int out_H = grid.sizes[2];
int out_W = grid.sizes[3];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sD = input.strides[2];
int inp_sH = input.strides[3];
int inp_sW = input.strides[4];
int grid_sN = grid.strides[0];
int grid_sD = grid.strides[1];
int grid_sH = grid.strides[2];
int grid_sW = grid.strides[3];
int grid_sCoor = grid.strides[4];
int gOut_sN = grad_output.strides[0];
int gOut_sC = grad_output.strides[1];
int gOut_sD = grad_output.strides[2];
int gOut_sH = grad_output.strides[3];
int gOut_sW = grad_output.strides[4];
int gInp_sN = grad_input.strides[0];
int gInp_sC = grad_input.strides[1];
int gInp_sD = grad_input.strides[2];
int gInp_sH = grad_input.strides[3];
int gInp_sW = grad_input.strides[4];
int gGrid_sW = grad_grid.strides[3];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int d = (index / (out_H * out_W)) % out_D;
const int n = index / (out_D * out_H * out_W);
const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor];
// multipliers for gradients on ix, iy, and iz
scalar_t gix_mult, giy_mult, giz_mult;
ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult);
iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult);
iz = grid_sampler_compute_source_index_set_grad(iz, inp_D, padding_mode, align_corners, &giz_mult);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
int ix_tnw = static_cast<int>(::floor(ix));
int iy_tnw = static_cast<int>(::floor(iy));
int iz_tnw = static_cast<int>(::floor(iz));
int ix_tne = ix_tnw + 1;
int iy_tne = iy_tnw;
int iz_tne = iz_tnw;
int ix_tsw = ix_tnw;
int iy_tsw = iy_tnw + 1;
int iz_tsw = iz_tnw;
int ix_tse = ix_tnw + 1;
int iy_tse = iy_tnw + 1;
int iz_tse = iz_tnw;
int ix_bnw = ix_tnw;
int iy_bnw = iy_tnw;
int iz_bnw = iz_tnw + 1;
int ix_bne = ix_tnw + 1;
int iy_bne = iy_tnw;
int iz_bne = iz_tnw + 1;
int ix_bsw = ix_tnw;
int iy_bsw = iy_tnw + 1;
int iz_bsw = iz_tnw + 1;
int ix_bse = ix_tnw + 1;
int iy_bse = iy_tnw + 1;
int iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
// calculate bilinear weighted pixel value and set output pixel
for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC, inp_ptr_NC += inp_sC) {
scalar_t gOut = *gOut_ptr_NCDHW;
// calculate and set grad_input
safe_add_3d(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut);
safe_add_3d(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut);
safe_add_3d(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut);
safe_add_3d(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut);
safe_add_3d(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut);
safe_add_3d(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut);
safe_add_3d(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut);
safe_add_3d(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut);
// calculate grad_grid
if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) {
scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW];
gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut;
giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut;
giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut;
}
if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) {
scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW];
gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut;
giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut;
giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut;
}
if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) {
scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW];
gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut;
giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut;
giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut;
}
if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) {
scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW];
gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut;
giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut;
giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut;
}
if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) {
scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW];
gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut;
giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut;
giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut;
}
if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) {
scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW];
gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut;
giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut;
giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut;
}
if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) {
scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW];
gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut;
giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut;
giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut;
}
if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) {
scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW];
gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut;
giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut;
giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut;
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW
// 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2]
scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NDHW[0] = gix_mult * gix;
gGrid_ptr_NDHW[1] = giy_mult * giy;
gGrid_ptr_NDHW[2] = giz_mult * giz;
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
int iz_nearest = static_cast<int>(::round(iz));
// assign nearest neighor pixel value to output pixel
scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC) {
// calculate and set grad_input
safe_add_3d(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest,
gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW);
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW
// 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2]
scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0);
gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0);
gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0);
}
}
}
} // namespace
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
Tensor grid_sampler_2d_cuda(const Tensor& input, const Tensor& grid,
int64_t interpolation_mode, int64_t padding_mode,
bool align_corners) {
auto N = input.size(0);
auto H = grid.size(1);
auto W = grid.size(2);
auto output = at::empty({N, input.size(1), H, W}, input.options());
int count = static_cast<int>(N * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] {
grid_sampler_2d_kernel<scalar_t>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return output;
}
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
Tensor grid_sampler_3d_cuda(const Tensor& input, const Tensor& grid,
int64_t interpolation_mode, int64_t padding_mode,
bool align_corners) {
auto N = input.size(0);
auto D = grid.size(1);
auto H = grid.size(2);
auto W = grid.size(3);
auto output = at::empty({N, input.size(1), D, H, W}, input.options());
int count = static_cast<int>(N * D * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] {
grid_sampler_3d_kernel<scalar_t>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return output;
}
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
std::tuple<Tensor, Tensor>
grid_sampler_2d_backward_cuda(const Tensor& grad_output, const Tensor& input,
const Tensor& grid, int64_t interpolation_mode,
int64_t padding_mode, bool align_corners) {
auto N = input.size(0);
auto H = grid.size(1);
auto W = grid.size(2);
auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto grad_grid = at::empty_like(grid, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
int count = static_cast<int>(N * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_backward_cuda", [&] {
grid_sampler_2d_backward_kernel<scalar_t>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
getTensorInfo<scalar_t, int>(grad_output),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(grad_input),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return std::make_tuple(grad_input, grad_grid);
}
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
std::tuple<Tensor, Tensor>
grid_sampler_3d_backward_cuda(const Tensor& grad_output, const Tensor& input,
const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode,
bool align_corners) {
auto N = input.size(0);
auto D = grid.size(1);
auto H = grid.size(2);
auto W = grid.size(3);
auto grad_input = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto grad_grid = at::empty_like(grid, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
int count = static_cast<int>(N * D * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_backward_cuda", [&] {
grid_sampler_3d_backward_kernel<scalar_t>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
getTensorInfo<scalar_t, int>(grad_output),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(grad_input),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return std::make_tuple(grad_input, grad_grid);
}
}} // namespace at::native
|
7ffa94ed852784cf7632348ba1ecd3893a3a1184.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <time.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#define STOP 0
#define START 1
#define BLOCKSIZE 256
extern "C" void chrono (int kind, float *time);
__global__ void kconvol (float *gpu_a, float *gpu_b, int n) {
int i, j, l;
// TO DO : evaluate the global 1D index l of the current thread,
// using blockDim, blockIdx and threadIdx.
l = threadIdx.x + blockIdx.x * blockDim.x;
int bIdx = (blockIdx.x);
int cDim = (blockDim.x);
int TIdx = (threadIdx.x);
// TO DO : evaluate global indices of thread (i,j) from the index l
j = l % n;
i = l / n;
//printf("%d\n", l);
//printf("---------\nl = %d, (i,j) = (%d,%d)\nblockIdx: %d\nblockDim: %d\nthreadIdx: %d\n", l, i, j, bIdx, cDim, TIdx);
if ((i >= n) || (j >= n)) return;
if ((i == 0) || (j == 0) || (i == n-1) || (j == n-1)) {
gpu_b[l] = gpu_a[l]; // edges are untouched
}
else
// TO DO : fill up the MISSING indices below
gpu_b[l]=(1./5.)*(gpu_a[l-n] + gpu_a[l-1] + gpu_a[l] + gpu_a[l+1]+ gpu_a[l+n]);
}
extern "C" void gpu_convol (float *a, float *b, int n, int blocks) {
float *gpu_a;
float *gpu_b;
hipError_t err;
float time;
err = hipMalloc (&gpu_a, n*n*sizeof(float));
if (err != 0) {
printf ("Error allocating gpu_a: %s\n", hipGetErrorString (err));
exit (1);
}
err = hipMalloc (&gpu_b, n*n*sizeof(float));
if (err != 0) {
printf ("Error allocating gpu_b: %s\n", hipGetErrorString (err));
exit (1);
}
hipMemcpy (gpu_a, a, n*n*sizeof(float), hipMemcpyHostToDevice);
// NOTE : the chronometer below does not contemplate overhead of memory allocation and
// memory transfer.
chrono (START, &time);
// TO DO : the number of blocks is missing below in the kernel invocation
//int blocks = (1000192) / BLOCKSIZE;
printf("block: %d\n", blocks);
printf("blocksize: %d\n", BLOCKSIZE);
hipLaunchKernelGGL(( kconvol) , dim3(blocks),dim3(BLOCKSIZE), 0, 0, gpu_a, gpu_b, n);
err=hipDeviceSynchronize ();
chrono (STOP, &time);
printf ("Convolution took %f sec. on GPU\n", time);
hipMemcpy (b, gpu_b, n*n*sizeof(float), hipMemcpyDeviceToHost);
if (err != 0) {
printf ("%s\n", hipGetErrorString (err));
exit (1);
}
FILE *fp;
fp = fopen("timing_plot_1000x1000.out", "a");
fprintf(fp, "%d, %.10g\n", blocks, time);
hipFree (gpu_a);
hipFree (gpu_b);
}
|
7ffa94ed852784cf7632348ba1ecd3893a3a1184.cu
|
#include <time.h>
#include <cuda.h>
#include <stdio.h>
#define STOP 0
#define START 1
#define BLOCKSIZE 256
extern "C" void chrono (int kind, float *time);
__global__ void kconvol (float *gpu_a, float *gpu_b, int n) {
int i, j, l;
// TO DO : evaluate the global 1D index l of the current thread,
// using blockDim, blockIdx and threadIdx.
l = threadIdx.x + blockIdx.x * blockDim.x;
int bIdx = (blockIdx.x);
int cDim = (blockDim.x);
int TIdx = (threadIdx.x);
// TO DO : evaluate global indices of thread (i,j) from the index l
j = l % n;
i = l / n;
//printf("%d\n", l);
//printf("---------\nl = %d, (i,j) = (%d,%d)\nblockIdx: %d\nblockDim: %d\nthreadIdx: %d\n", l, i, j, bIdx, cDim, TIdx);
if ((i >= n) || (j >= n)) return;
if ((i == 0) || (j == 0) || (i == n-1) || (j == n-1)) {
gpu_b[l] = gpu_a[l]; // edges are untouched
}
else
// TO DO : fill up the MISSING indices below
gpu_b[l]=(1./5.)*(gpu_a[l-n] + gpu_a[l-1] + gpu_a[l] + gpu_a[l+1]+ gpu_a[l+n]);
}
extern "C" void gpu_convol (float *a, float *b, int n, int blocks) {
float *gpu_a;
float *gpu_b;
cudaError_t err;
float time;
err = cudaMalloc (&gpu_a, n*n*sizeof(float));
if (err != 0) {
printf ("Error allocating gpu_a: %s\n", cudaGetErrorString (err));
exit (1);
}
err = cudaMalloc (&gpu_b, n*n*sizeof(float));
if (err != 0) {
printf ("Error allocating gpu_b: %s\n", cudaGetErrorString (err));
exit (1);
}
cudaMemcpy (gpu_a, a, n*n*sizeof(float), cudaMemcpyHostToDevice);
// NOTE : the chronometer below does not contemplate overhead of memory allocation and
// memory transfer.
chrono (START, &time);
// TO DO : the number of blocks is missing below in the kernel invocation
//int blocks = (1000192) / BLOCKSIZE;
printf("block: %d\n", blocks);
printf("blocksize: %d\n", BLOCKSIZE);
kconvol <<<blocks,BLOCKSIZE>>> (gpu_a, gpu_b, n);
err=cudaDeviceSynchronize ();
chrono (STOP, &time);
printf ("Convolution took %f sec. on GPU\n", time);
cudaMemcpy (b, gpu_b, n*n*sizeof(float), cudaMemcpyDeviceToHost);
if (err != 0) {
printf ("%s\n", cudaGetErrorString (err));
exit (1);
}
FILE *fp;
fp = fopen("timing_plot_1000x1000.out", "a");
fprintf(fp, "%d, %.10g\n", blocks, time);
cudaFree (gpu_a);
cudaFree (gpu_b);
}
|
e1de8949a17ef7ddf67eed2c570261c6c4086248.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/opencv.hpp>
#include <helper_cuda.h>
#include <extern/cuda/matrix.h>
#include <geometry/geometry_helper.h>
#include <device_launch_parameters.h>
#include <extern/cuda/helper_cuda.h>
#include "core/params.h"
#include "preprocess.h"
#include "util/timer.h"
__global__
void ResetInlierRatioKernel(
float *inlier_ratio,
int width,
int height
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int idx = y * width + x;
/// Convert mm -> m
inlier_ratio[idx] = 0.1f;
}
__global__
void ConvertDepthFormatKernel(
float *dst, short *src,
uint width, uint height,
float range_factor,
float min_depth_range,
float max_depth_range
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int idx = y * width + x;
/// Convert mm -> m
const float depth = range_factor * src[idx];
bool is_valid = (depth >= min_depth_range && depth <= max_depth_range);
dst[idx] = is_valid ? depth : MINF;
}
__global__
void ConvertColorFormatKernel(float4 *dst, uchar4 *src,
uint width, uint height)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int idx = y * width + x;
uchar4 c = src[idx];
bool is_valid = (c.x != 0 && c.y != 0 && c.z != 0);
dst[idx] = is_valid ? make_float4(c.z / 255.0f, c.y / 255.0f,
c.x / 255.0f, c.w / 255.0f)
: make_float4(MINF, MINF, MINF, MINF);
}
__device__
size_t GetArrayIndex(int x, int y, int width)
{
return static_cast<size_t>(y * width + x);
}
__global__
void NormalizeNormalsKernel(float4 *normal, uint width)
{
const int ux = blockIdx.x * blockDim.x + threadIdx.x;
const int uy = blockIdx.y * blockDim.y + threadIdx.y;
const size_t idx = GetArrayIndex(ux, uy, width);
normal[idx] = make_float4(normalize(make_float3(normal[idx])), 1.0f);
}
/**
* Implementation from BundleFusion
* Copyright (c) 2017 by Angela Dai and Matthias Niessner
*/
inline __device__ float gaussD(float factor, int x, int y)
{
return exp(-((x * x + y * y) * factor));
}
/**
* Implementation from BundleFusion
* Copyright (c) 2017 by Angela Dai and Matthias Niessner
*/
inline __device__ float gaussR(float factor, float dist)
{
return exp(-(dist * dist) * factor);
}
/**
* Implementation from BundleFusion
* Copyright (c) 2017 by Angela Dai and Matthias Niessner
*
* @param input
* @param output
* @param sigma_d
* @param sigma_r
*/
__global__
void BilateralFilterKernel(float4 *input, float4 *output, float sigma_d, float sigma_r, uint width, uint height)
{
const int ux = blockIdx.x * blockDim.x + threadIdx.x;
const int uy = blockIdx.y * blockDim.y + threadIdx.y;
if (ux >= width or uy >= height)
return;
const uint idx = uy * width + ux;
output[idx] = make_float4(MINF);
const float4 center = input[idx];
if (center.x == MINF or center.y == MINF or center.z == MINF or center.w == MINF)
return;
float4 sum = make_float4(0.0f);
float sum_weight = 0.0f;
float sigma_d_factor = 1 / (2.0f * sigma_d * sigma_d);
float sigma_r_factor = 1 / (2.0f * sigma_r * sigma_r);
const uint radius = (uint) ceil(sigma_d);
for (int i = ux - radius; i <= ux + radius; i++)
{
for (int j = uy - radius; j <= uy + radius; j++)
{
if (i < 0 or j < 0 or i >= width or j >= height)
continue;
const float4 value = input[j * width + i];
if (value.x == MINF or value.y == MINF or value.z == MINF or value.w == MINF)
continue;
const float weight = gaussD(sigma_d_factor, i - ux, j - uy) * gaussR(sigma_r_factor, length(value - center));
sum += weight * value;
sum_weight += weight;
}
}
if (sum_weight >= 0.0f)
{
output[idx] = sum / sum_weight;
}
}
/**
* Implementation from BundleFusion
* Copyright (c) 2017 by Angela Dai and Matthias Niessner
*
* @param input
* @param output
* @param sigma_d
* @param sigma_r
*/
__global__
void BilateralFilterKernelFloat(float *input, float *output, float sigma_d, float sigma_r, uint width, uint height)
{
const int ux = blockIdx.x * blockDim.x + threadIdx.x;
const int uy = blockIdx.y * blockDim.y + threadIdx.y;
if (ux >= width or uy >= height)
return;
const uint idx = uy * width + ux;
output[idx] = MINF;
const float center = input[idx];
if (center == MINF)
return;
float sum = 0.0f;
float sum_weight = 0.0f;
const uint radius = (uint) ceil(2.0 * sigma_d);
for (int i = ux - radius; i <= ux + radius; i++)
{
for (int j = uy - radius; j <= uy + radius; j++)
{
if (i < 0 or j < 0 or i >= width or j >= height)
continue;
const float value = input[j * width + i];
if (value == MINF)
continue;
const float weight = gaussD(sigma_d, i - ux, j - uy) * gaussR(sigma_r, abs(value - center));
sum += weight * value;
sum_weight += weight;
}
}
if (sum_weight >= 0.0f)
{
output[idx] = sum / sum_weight;
}
}
__global__
void ComputeNormalMapKernel(float4 *normal, float *depth,
uint width, uint height,
float fx, float fy, float cx, float cy)
{
const int ux = blockIdx.x * blockDim.x + threadIdx.x;
const int uy = blockIdx.y * blockDim.y + threadIdx.y;
const size_t idx = GetArrayIndex(ux, uy, width);
if (ux < 1 or uy < 1 or ux + 1 >= width or uy + 1 >= height)
{
if (ux == 1 or uy == 1 or ux + 1 == width or uy + 1 == height)
normal[idx] = make_float4(0);
return;
}
float3 normal_ = make_float3(0);
float3 center = GeometryHelper::ImageReprojectToCamera(ux, uy, depth[idx], fx, fy, cx, cy);
float3 neighbors[8];
size_t count = 0;
static const int2 coords[8] = {{-1, -1},
{0, -1},
{1, -1},
{1, 0},
{1, 1},
{0, 1},
{-1, 1},
{-1, 0}};
for (int i = 0; i < 8; i++)
{
int u = ux + coords[i].x;
int v = uy + coords[i].y;
float depth_value = depth[GetArrayIndex(u, v, width)];
if (depth_value == 0.0f or depth_value == MINF)
{
normal[idx] = make_float4(0);
return;
}
neighbors[count] = GeometryHelper::ImageReprojectToCamera(u, v, depth_value, fx, fy, cx, cy);
count++;
}
for (int i = 0; i < count; i++)
{
float3 n = normalize(cross(neighbors[i] - center, neighbors[(i + 1) % 4] - center));
if (n.z > 0) // This is an outlier case caused by faulty depth data!
continue;
normal_ += n;
}
normal[idx] = make_float4(normalize(make_float3(normal_.x, normal_.y, normal_.z)), 0.0f);
}
//////////
/// Member function: (CPU calling GPU kernels)
__host__
void ResetInlierRatio(
float *inlier_ratio,
SensorParams ¶ms
)
{
uint width = params.width;
uint height = params.height;
const uint threads_per_block = 16;
const dim3 grid_size((width + threads_per_block - 1) / threads_per_block,
(height + threads_per_block - 1) / threads_per_block);
const dim3 block_size(threads_per_block, threads_per_block);
ResetInlierRatioKernel << < grid_size, block_size >> > (
inlier_ratio, width, height);
}
__host__
void ConvertDepthFormat(
cv::Mat &depth_img,
short *depth_buffer,
float *depth_data,
SensorParams ¶ms
)
{
/// First copy cpu data in to cuda short
uint width = params.width;
uint height = params.height;
uint image_size = width * height;
checkCudaErrors(hipMemcpy(depth_buffer, (short *) depth_img.data,
sizeof(short) * image_size,
hipMemcpyHostToDevice));
const uint threads_per_block = 16;
const dim3 grid_size((width + threads_per_block - 1) / threads_per_block,
(height + threads_per_block - 1) / threads_per_block);
const dim3 block_size(threads_per_block, threads_per_block);
ConvertDepthFormatKernel << < grid_size, block_size >> > (
depth_data,
depth_buffer,
width, height,
params.range_factor,
params.min_depth_range,
params.max_depth_range);
// float *depth_tmp;
// checkCudaErrors(hipMalloc(&depth_tmp, sizeof(float) * width * height));
// ConvertDepthFormatKernel << < grid_size, block_size >> > (
// depth_tmp,
// depth_buffer,
// width, height,
// params.range_factor,
// params.min_depth_range,
// params.max_depth_range);
// BilateralFilterKernelFloat << < grid_size, block_size >> > (
// depth_tmp,
// depth_data,
// 5,
// 5,
// width,
// height
// );
// checkCudaErrors(hipFree(depth_tmp));
}
__host__
void ConvertColorFormat(
cv::Mat &color_img,
uchar4 *color_buffer,
float4 *color_data,
SensorParams ¶ms
)
{
uint width = params.width;
uint height = params.height;
uint image_size = width * height;
checkCudaErrors(hipMemcpy(color_buffer, color_img.data,
sizeof(uchar4) * image_size,
hipMemcpyHostToDevice));
const int threads_per_block = 16;
const dim3 grid_size((width + threads_per_block - 1) / threads_per_block,
(height + threads_per_block - 1) / threads_per_block);
const dim3 block_size(threads_per_block, threads_per_block);
ConvertColorFormatKernel << < grid_size, block_size >> > (
color_data,
color_buffer,
width,
height);
}
__host__
void ComputeNormalMap(
float *depth_data,
float4 *normal_data,
SensorParams ¶ms,
double &normal_estimation_time,
double &bilateral_filter_time
)
{
Timer timer;
uint width = params.width;
uint height = params.height;
const int threads_per_block = 16;
const dim3 grid_size((width + threads_per_block - 1) / threads_per_block,
(height + threads_per_block - 1) / threads_per_block);
const dim3 block_size(threads_per_block, threads_per_block);
// Filter depth image BEFORE normal estimation
// cv::cuda::GpuMat depth_img(height, width, CV_32FC1, depth_data);
// cv::cuda::GpuMat depth_img_filtered;
// cv::cuda::bilateralFilter(depth_img, depth_img_filtered, -1, 5, 5, cv::BORDER_DEFAULT);
timer.Tick();
float4 *normals_tmp;
checkCudaErrors(hipMalloc(&normals_tmp, sizeof(float4) * width * height));
ComputeNormalMapKernel << < grid_size, block_size >> > (
// normal_data,
normals_tmp,
// reinterpret_cast<float *>(depth_img_filtered.data),
depth_data,
width,
height,
params.fx, params.fy, params.cx, params.cy
);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
normal_estimation_time = timer.Tock();
timer.Tick();
BilateralFilterKernel << < grid_size, block_size >> > (
normals_tmp,
normal_data,
5,
5,
width,
height
);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipGetLastError());
bilateral_filter_time = timer.Tock();
checkCudaErrors(hipFree(normals_tmp));
// Filter normal data AFTER normal estimation
// cv::cuda::GpuMat normal_map(height, width, CV_32FC4, normal_data);
// cv::cuda::GpuMat normal_map_filtered;
// cv::cuda::bilateralFilter(normal_map, normal_map_filtered, -1, 5, 5, cv::BORDER_DEFAULT);
// checkCudaErrors(hipMemcpy(normal_data, normal_map_filtered.data,
// sizeof(float4) * height * width,
// hipMemcpyDeviceToDevice));
NormalizeNormalsKernel << < grid_size, block_size >> > (normal_data, width);
}
|
e1de8949a17ef7ddf67eed2c570261c6c4086248.cu
|
#include <opencv2/opencv.hpp>
#include <helper_cuda.h>
#include <extern/cuda/matrix.h>
#include <geometry/geometry_helper.h>
#include <device_launch_parameters.h>
#include <extern/cuda/helper_cuda.h>
#include "core/params.h"
#include "preprocess.h"
#include "util/timer.h"
__global__
void ResetInlierRatioKernel(
float *inlier_ratio,
int width,
int height
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int idx = y * width + x;
/// Convert mm -> m
inlier_ratio[idx] = 0.1f;
}
__global__
void ConvertDepthFormatKernel(
float *dst, short *src,
uint width, uint height,
float range_factor,
float min_depth_range,
float max_depth_range
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int idx = y * width + x;
/// Convert mm -> m
const float depth = range_factor * src[idx];
bool is_valid = (depth >= min_depth_range && depth <= max_depth_range);
dst[idx] = is_valid ? depth : MINF;
}
__global__
void ConvertColorFormatKernel(float4 *dst, uchar4 *src,
uint width, uint height)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int idx = y * width + x;
uchar4 c = src[idx];
bool is_valid = (c.x != 0 && c.y != 0 && c.z != 0);
dst[idx] = is_valid ? make_float4(c.z / 255.0f, c.y / 255.0f,
c.x / 255.0f, c.w / 255.0f)
: make_float4(MINF, MINF, MINF, MINF);
}
__device__
size_t GetArrayIndex(int x, int y, int width)
{
return static_cast<size_t>(y * width + x);
}
__global__
void NormalizeNormalsKernel(float4 *normal, uint width)
{
const int ux = blockIdx.x * blockDim.x + threadIdx.x;
const int uy = blockIdx.y * blockDim.y + threadIdx.y;
const size_t idx = GetArrayIndex(ux, uy, width);
normal[idx] = make_float4(normalize(make_float3(normal[idx])), 1.0f);
}
/**
* Implementation from BundleFusion
* Copyright (c) 2017 by Angela Dai and Matthias Niessner
*/
inline __device__ float gaussD(float factor, int x, int y)
{
return exp(-((x * x + y * y) * factor));
}
/**
* Implementation from BundleFusion
* Copyright (c) 2017 by Angela Dai and Matthias Niessner
*/
inline __device__ float gaussR(float factor, float dist)
{
return exp(-(dist * dist) * factor);
}
/**
* Implementation from BundleFusion
* Copyright (c) 2017 by Angela Dai and Matthias Niessner
*
* @param input
* @param output
* @param sigma_d
* @param sigma_r
*/
__global__
void BilateralFilterKernel(float4 *input, float4 *output, float sigma_d, float sigma_r, uint width, uint height)
{
const int ux = blockIdx.x * blockDim.x + threadIdx.x;
const int uy = blockIdx.y * blockDim.y + threadIdx.y;
if (ux >= width or uy >= height)
return;
const uint idx = uy * width + ux;
output[idx] = make_float4(MINF);
const float4 center = input[idx];
if (center.x == MINF or center.y == MINF or center.z == MINF or center.w == MINF)
return;
float4 sum = make_float4(0.0f);
float sum_weight = 0.0f;
float sigma_d_factor = 1 / (2.0f * sigma_d * sigma_d);
float sigma_r_factor = 1 / (2.0f * sigma_r * sigma_r);
const uint radius = (uint) ceil(sigma_d);
for (int i = ux - radius; i <= ux + radius; i++)
{
for (int j = uy - radius; j <= uy + radius; j++)
{
if (i < 0 or j < 0 or i >= width or j >= height)
continue;
const float4 value = input[j * width + i];
if (value.x == MINF or value.y == MINF or value.z == MINF or value.w == MINF)
continue;
const float weight = gaussD(sigma_d_factor, i - ux, j - uy) * gaussR(sigma_r_factor, length(value - center));
sum += weight * value;
sum_weight += weight;
}
}
if (sum_weight >= 0.0f)
{
output[idx] = sum / sum_weight;
}
}
/**
* Implementation from BundleFusion
* Copyright (c) 2017 by Angela Dai and Matthias Niessner
*
* @param input
* @param output
* @param sigma_d
* @param sigma_r
*/
__global__
void BilateralFilterKernelFloat(float *input, float *output, float sigma_d, float sigma_r, uint width, uint height)
{
const int ux = blockIdx.x * blockDim.x + threadIdx.x;
const int uy = blockIdx.y * blockDim.y + threadIdx.y;
if (ux >= width or uy >= height)
return;
const uint idx = uy * width + ux;
output[idx] = MINF;
const float center = input[idx];
if (center == MINF)
return;
float sum = 0.0f;
float sum_weight = 0.0f;
const uint radius = (uint) ceil(2.0 * sigma_d);
for (int i = ux - radius; i <= ux + radius; i++)
{
for (int j = uy - radius; j <= uy + radius; j++)
{
if (i < 0 or j < 0 or i >= width or j >= height)
continue;
const float value = input[j * width + i];
if (value == MINF)
continue;
const float weight = gaussD(sigma_d, i - ux, j - uy) * gaussR(sigma_r, abs(value - center));
sum += weight * value;
sum_weight += weight;
}
}
if (sum_weight >= 0.0f)
{
output[idx] = sum / sum_weight;
}
}
__global__
void ComputeNormalMapKernel(float4 *normal, float *depth,
uint width, uint height,
float fx, float fy, float cx, float cy)
{
const int ux = blockIdx.x * blockDim.x + threadIdx.x;
const int uy = blockIdx.y * blockDim.y + threadIdx.y;
const size_t idx = GetArrayIndex(ux, uy, width);
if (ux < 1 or uy < 1 or ux + 1 >= width or uy + 1 >= height)
{
if (ux == 1 or uy == 1 or ux + 1 == width or uy + 1 == height)
normal[idx] = make_float4(0);
return;
}
float3 normal_ = make_float3(0);
float3 center = GeometryHelper::ImageReprojectToCamera(ux, uy, depth[idx], fx, fy, cx, cy);
float3 neighbors[8];
size_t count = 0;
static const int2 coords[8] = {{-1, -1},
{0, -1},
{1, -1},
{1, 0},
{1, 1},
{0, 1},
{-1, 1},
{-1, 0}};
for (int i = 0; i < 8; i++)
{
int u = ux + coords[i].x;
int v = uy + coords[i].y;
float depth_value = depth[GetArrayIndex(u, v, width)];
if (depth_value == 0.0f or depth_value == MINF)
{
normal[idx] = make_float4(0);
return;
}
neighbors[count] = GeometryHelper::ImageReprojectToCamera(u, v, depth_value, fx, fy, cx, cy);
count++;
}
for (int i = 0; i < count; i++)
{
float3 n = normalize(cross(neighbors[i] - center, neighbors[(i + 1) % 4] - center));
if (n.z > 0) // This is an outlier case caused by faulty depth data!
continue;
normal_ += n;
}
normal[idx] = make_float4(normalize(make_float3(normal_.x, normal_.y, normal_.z)), 0.0f);
}
//////////
/// Member function: (CPU calling GPU kernels)
__host__
void ResetInlierRatio(
float *inlier_ratio,
SensorParams ¶ms
)
{
uint width = params.width;
uint height = params.height;
const uint threads_per_block = 16;
const dim3 grid_size((width + threads_per_block - 1) / threads_per_block,
(height + threads_per_block - 1) / threads_per_block);
const dim3 block_size(threads_per_block, threads_per_block);
ResetInlierRatioKernel << < grid_size, block_size >> > (
inlier_ratio, width, height);
}
__host__
void ConvertDepthFormat(
cv::Mat &depth_img,
short *depth_buffer,
float *depth_data,
SensorParams ¶ms
)
{
/// First copy cpu data in to cuda short
uint width = params.width;
uint height = params.height;
uint image_size = width * height;
checkCudaErrors(cudaMemcpy(depth_buffer, (short *) depth_img.data,
sizeof(short) * image_size,
cudaMemcpyHostToDevice));
const uint threads_per_block = 16;
const dim3 grid_size((width + threads_per_block - 1) / threads_per_block,
(height + threads_per_block - 1) / threads_per_block);
const dim3 block_size(threads_per_block, threads_per_block);
ConvertDepthFormatKernel << < grid_size, block_size >> > (
depth_data,
depth_buffer,
width, height,
params.range_factor,
params.min_depth_range,
params.max_depth_range);
// float *depth_tmp;
// checkCudaErrors(cudaMalloc(&depth_tmp, sizeof(float) * width * height));
// ConvertDepthFormatKernel << < grid_size, block_size >> > (
// depth_tmp,
// depth_buffer,
// width, height,
// params.range_factor,
// params.min_depth_range,
// params.max_depth_range);
// BilateralFilterKernelFloat << < grid_size, block_size >> > (
// depth_tmp,
// depth_data,
// 5,
// 5,
// width,
// height
// );
// checkCudaErrors(cudaFree(depth_tmp));
}
__host__
void ConvertColorFormat(
cv::Mat &color_img,
uchar4 *color_buffer,
float4 *color_data,
SensorParams ¶ms
)
{
uint width = params.width;
uint height = params.height;
uint image_size = width * height;
checkCudaErrors(cudaMemcpy(color_buffer, color_img.data,
sizeof(uchar4) * image_size,
cudaMemcpyHostToDevice));
const int threads_per_block = 16;
const dim3 grid_size((width + threads_per_block - 1) / threads_per_block,
(height + threads_per_block - 1) / threads_per_block);
const dim3 block_size(threads_per_block, threads_per_block);
ConvertColorFormatKernel << < grid_size, block_size >> > (
color_data,
color_buffer,
width,
height);
}
__host__
void ComputeNormalMap(
float *depth_data,
float4 *normal_data,
SensorParams ¶ms,
double &normal_estimation_time,
double &bilateral_filter_time
)
{
Timer timer;
uint width = params.width;
uint height = params.height;
const int threads_per_block = 16;
const dim3 grid_size((width + threads_per_block - 1) / threads_per_block,
(height + threads_per_block - 1) / threads_per_block);
const dim3 block_size(threads_per_block, threads_per_block);
// Filter depth image BEFORE normal estimation
// cv::cuda::GpuMat depth_img(height, width, CV_32FC1, depth_data);
// cv::cuda::GpuMat depth_img_filtered;
// cv::cuda::bilateralFilter(depth_img, depth_img_filtered, -1, 5, 5, cv::BORDER_DEFAULT);
timer.Tick();
float4 *normals_tmp;
checkCudaErrors(cudaMalloc(&normals_tmp, sizeof(float4) * width * height));
ComputeNormalMapKernel << < grid_size, block_size >> > (
// normal_data,
normals_tmp,
// reinterpret_cast<float *>(depth_img_filtered.data),
depth_data,
width,
height,
params.fx, params.fy, params.cx, params.cy
);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
normal_estimation_time = timer.Tock();
timer.Tick();
BilateralFilterKernel << < grid_size, block_size >> > (
normals_tmp,
normal_data,
5,
5,
width,
height
);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaGetLastError());
bilateral_filter_time = timer.Tock();
checkCudaErrors(cudaFree(normals_tmp));
// Filter normal data AFTER normal estimation
// cv::cuda::GpuMat normal_map(height, width, CV_32FC4, normal_data);
// cv::cuda::GpuMat normal_map_filtered;
// cv::cuda::bilateralFilter(normal_map, normal_map_filtered, -1, 5, 5, cv::BORDER_DEFAULT);
// checkCudaErrors(cudaMemcpy(normal_data, normal_map_filtered.data,
// sizeof(float4) * height * width,
// cudaMemcpyDeviceToDevice));
NormalizeNormalsKernel << < grid_size, block_size >> > (normal_data, width);
}
|
e42a5e4a8777d08c4139804738d608272833c978.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// findRangeK function
__global__ void
findRangeK( const long height,
const knode *knodesD,
const long knodes_elem,
long *currKnodeD,
long *offsetD,
long *lastKnodeD,
long *offset_2D,
const int *startD,
const int *endD,
int *RecstartD,
int *ReclenD)
{
// private thread IDs
int thid = threadIdx.x;
int bid = blockIdx.x;
for(int i = 0; i < height; i++){
if((knodesD[currKnodeD[bid]].keys[thid] <= startD[bid]) && (knodesD[currKnodeD[bid]].keys[thid+1] > startD[bid])){
// this conditional statement is inserted to avoid crush due to but in original code
// "offset[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault
// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address
if(knodesD[currKnodeD[bid]].indices[thid] < knodes_elem){
offsetD[bid] = knodesD[currKnodeD[bid]].indices[thid];
}
}
if((knodesD[lastKnodeD[bid]].keys[thid] <= endD[bid]) && (knodesD[lastKnodeD[bid]].keys[thid+1] > endD[bid])){
// this conditional statement is inserted to avoid crush due to but in original code
// "offset_2[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault
// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address
if(knodesD[lastKnodeD[bid]].indices[thid] < knodes_elem){
offset_2D[bid] = knodesD[lastKnodeD[bid]].indices[thid];
}
}
__syncthreads();
// set for next tree level
if(thid==0){
currKnodeD[bid] = offsetD[bid];
lastKnodeD[bid] = offset_2D[bid];
}
__syncthreads();
}
// Find the index of the starting record
if(knodesD[currKnodeD[bid]].keys[thid] == startD[bid]){
RecstartD[bid] = knodesD[currKnodeD[bid]].indices[thid];
}
__syncthreads();
// Find the index of the ending record
if(knodesD[lastKnodeD[bid]].keys[thid] == endD[bid]){
ReclenD[bid] = knodesD[lastKnodeD[bid]].indices[thid] - RecstartD[bid]+1;
}
}
|
e42a5e4a8777d08c4139804738d608272833c978.cu
|
// findRangeK function
__global__ void
findRangeK( const long height,
const knode *knodesD,
const long knodes_elem,
long *currKnodeD,
long *offsetD,
long *lastKnodeD,
long *offset_2D,
const int *startD,
const int *endD,
int *RecstartD,
int *ReclenD)
{
// private thread IDs
int thid = threadIdx.x;
int bid = blockIdx.x;
for(int i = 0; i < height; i++){
if((knodesD[currKnodeD[bid]].keys[thid] <= startD[bid]) && (knodesD[currKnodeD[bid]].keys[thid+1] > startD[bid])){
// this conditional statement is inserted to avoid crush due to but in original code
// "offset[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault
// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address
if(knodesD[currKnodeD[bid]].indices[thid] < knodes_elem){
offsetD[bid] = knodesD[currKnodeD[bid]].indices[thid];
}
}
if((knodesD[lastKnodeD[bid]].keys[thid] <= endD[bid]) && (knodesD[lastKnodeD[bid]].keys[thid+1] > endD[bid])){
// this conditional statement is inserted to avoid crush due to but in original code
// "offset_2[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault
// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address
if(knodesD[lastKnodeD[bid]].indices[thid] < knodes_elem){
offset_2D[bid] = knodesD[lastKnodeD[bid]].indices[thid];
}
}
__syncthreads();
// set for next tree level
if(thid==0){
currKnodeD[bid] = offsetD[bid];
lastKnodeD[bid] = offset_2D[bid];
}
__syncthreads();
}
// Find the index of the starting record
if(knodesD[currKnodeD[bid]].keys[thid] == startD[bid]){
RecstartD[bid] = knodesD[currKnodeD[bid]].indices[thid];
}
__syncthreads();
// Find the index of the ending record
if(knodesD[lastKnodeD[bid]].keys[thid] == endD[bid]){
ReclenD[bid] = knodesD[lastKnodeD[bid]].indices[thid] - RecstartD[bid]+1;
}
}
|
70dea03227c106338ce0832e89bd99c437598e1d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************//**
* \file calculateForce.cu
* \author Anush Krishnan ([email protected])
* \brief Implementation of the kernels to calculate the forces acting on a body
* using a control-volume approach.
* The method is described in Lai & Peskin (2000).
*/
#include "calculateForce.h"
#define BSZ 16
/**
* \namespace kernels
* \brief Contains all the custom-written CUDA kernels.
*/
namespace kernels
{
/**
* \brief Calculates drag using a control-volume approach (left-right).
*
* Evaluate the contribution from the left and right parts of the control surface.
*
* \param FxX raw pointer to the vector storing the drag in the x-direction
* \param lambda raw pointer to the vector storing all the pressure and Lagrangian forces
* \param q raw pointer to the vector storing all the fluxes
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param I x-index of the bottom-left corner cell of the control surface
* \param J y-index of the top-right corner cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param ncy number of cells in the y-direction in the control volume
*/
__global__
void dragLeftRight(real *FxX, real *q, real *lambda, real nu, real *dx, real *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= ncy)
return;
int Ip = (J+idx)*nx + I,
Iu = (J+idx)*(nx-1) + (I-1);
FxX[idx] = -(
// multiply the pressure with the surface area to get p dy
(lambda[Ip+ncx]-lambda[Ip-1])*dy[J+idx]
+
// divide q^2 by dy, so that just u^2 dy is obtained
(
0.25*(q[Iu+ncx+1] + q[Iu+ncx])*(q[Iu+ncx+1] + q[Iu+ncx])
- 0.25*(q[Iu] + q[Iu-1])*(q[Iu] + q[Iu-1])
)/dy[J+idx]
-
// no multiplication or division since du/dx dy = dq/dx
nu*
(
(q[Iu+ncx+1] - q[Iu+ncx])/dx[I+ncx]
- (q[Iu] - q[Iu-1])/dx[I-1]
)
);
}
/**
* \brief Calculate drag using a control-volume approach (bottom-top).
*
* Evaluate the contribution from the bottom and top parts of the control surface.
*
* \param FxY raw pointer to the vector storing the drag in the y-direction
* \param q raw pointer to the vector storing all the fluxes
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param I x-index of the bottom-left corner cell of the control surface
* \param J y-index of the top-right corner cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param ncy number of cells in the y-direction in the control volume
*/
__global__
void dragBottomTop(real *FxY, real *q, real nu, real *dx, real *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx > ncx)
return;
int Iu = J*(nx-1) + (I-1+idx),
Iv = (nx-1)*ny + (J-1)*nx + I+idx;
FxY[idx] = -(
// multiply by dS
(
0.25 * ( q[Iu+ncy*(nx-1)]/dy[J+ncy] + q[Iu+(ncy-1)*(nx-1)]/dy[J+ncy-1] )
* ( q[Iv+ncy*nx]/dx[I+idx] + q[Iv+ncy*nx-1]/dx[I+idx-1] )
-
0.25 * ( q[Iu]/dy[J] + q[Iu-(nx-1)]/dy[J-1] )
* ( q[Iv]/dx[I+idx] + q[Iv-1]/dx[I+idx-1] )
)
-
// multiply by dS (cannot use the leftRight trick in this case)
nu*
(
(
(q[Iu+ncy*(nx-1)]/dy[J+ncy] - q[Iu+(ncy-1)*(nx-1)]/dy[J+ncy-1])/2.0/(dy[J+ncy]+dy[J+ncy-1]) +
(q[Iv+ncy*nx]/dx[I+idx] - q[Iv+ncy*nx-1]/dx[I+idx-1])/2.0/(dx[I+idx]+dx[I+idx-1])
)
-
(
(q[Iu]/dy[J] - q[Iu-(nx-1)]/dy[J-1])/2.0/(dy[J]+dy[J-1]) +
(q[Iv]/dx[I+idx] - q[Iv-1]/dx[I+idx-1])/2.0/(dx[I+idx]+dx[I+idx-1])
)
)
)*0.5*(dx[I+idx]+dx[I+idx-1]);
}
/**
* \brief Calculate drag using a control-volume approach (unsteady).
*
* Evaluate the unsteady contribution of the control volume.
*
* \param FxU raw pointer to the vector storing the unsteady drag components
* \param q raw pointer to the vector storing all the fluxes
* \param qOld raw pointer to the vector sotring all the fluxes at the previous time-step
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param dt time increment
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void dragUnsteady(real *FxU, real *q, real *qOld, real *dx, real *dy, real dt,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= (ncx+1)*ncy)
return;
int i = idx%(ncx+1),
j = idx/(ncx+1);
int Iu = (J+j)*(nx-1) + (I-1+i);
FxU[idx] = - (q[Iu] - qOld[Iu])/dt * 0.5*(dx[I+i]+dx[I-1+i]);
}
/**
* \brief Calculate lift using a control-volume approach (left-right).
*
* Evaluate the contribution from the left and right parts of the control surface.
*
* \param FyX raw pointer to the vector storing the lift components in the x-direction
* \param q raw pointer to the vector storing all the fluxes
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void liftLeftRight(real *FyX, real *q, real nu, real *dx, real *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx > ncy)
return;
int Iu = (J+idx)*(nx-1) + (I-1),
Iv = (nx-1)*ny + (J-1+idx)*nx + I;
FyX[idx] = -(
// multiply by dS
(
0.25 * ( q[Iu+ncx]/dy[J+idx] + q[Iu+ncx-(nx-1)]/dy[J-1+idx] )
* ( q[Iv+ncx]/dx[I+ncx] + q[Iv+ncx-1]/dx[I+ncx-1] )
-
0.25 * ( q[Iu]/dy[J+idx] + q[Iu-(nx-1)]/dy[J-1+idx] )
* ( q[Iv]/dx[I] + q[Iv-1]/dx[I-1] )
)
-
// multiply by dS (cannot use the leftRight trick in this case)
nu*
(
(
(q[Iu+ncx]/dy[J+idx] - q[Iu+ncx-(nx-1)]/dy[J-1+idx])/2.0/(dy[J+idx]+dy[J-1+idx]) +
(q[Iv+ncx]/dx[I+ncx] - q[Iv+ncx-1]/dx[I+ncx-1])/2.0/(dx[I+ncx]+dx[I+ncx-1])
)
-
(
(q[Iu]/dy[J+idx] - q[Iu-(nx-1)]/dy[J-1+idx])/2.0/(dy[J+idx]+dy[J-1+idx]) +
(q[Iv]/dx[I] - q[Iv-1]/dx[I-1])/2.0/(dx[I]+dx[I-1])
)
)
)*0.5*(dy[J+idx]+dy[J-1+idx]);
}
/**
* \brief Calculate lift using a control-volume approach (bottom-top).
*
* Evaluate the contribution from the bottom and top parts of the control surface.
*
* \param FyY raw pointer to the vector storing the lift components in the y-direction
* \param q raw pointer to the vector storing all the fluxes
* \param lambda raw pointer to the vector storing the pressure and Lagrangian forces
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void liftBottomTop(real *FyY, real *q, real *lambda, real nu, real *dx, real *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= ncx)
return;
int Ip = J*nx + I+idx,
Iv = (nx-1)*ny + (J-1)*nx + I+idx;
FyY[idx] = -(
// multiply the pressure with the surface area to get p dx
(lambda[Ip+ncy*nx]-lambda[Ip-nx])*dx[I+idx]
+
// divide q^2 by dx, so that just v^2 dx is obtained
(
0.25*(q[Iv+(ncy+1)*nx] + q[Iv+ncy*nx])*(q[Iv+(ncy+1)*nx] + q[Iv+ncy*nx])
- 0.25*(q[Iv] + q[Iv-nx])*(q[Iv] + q[Iv-nx])
)/dx[I+idx]
-
// no multiplication or division since dv/dy dx = dq/dy
nu*
(
(q[Iv+(ncy+1)*nx] - q[Iv+ncy*nx])/dy[J+ncy]
- (q[Iv] - q[Iv-nx])/dy[J-1]
)
);
}
/**
* \brief Calculate lift using a control-volume approach (unsteady).
*
* Evaluate the unsteady contribution of the control volume.
*
* \param FyU raw pointer to the vector storing the unsteady lift components
* \param q raw pointer to the vector storing all the fluxes
* \param qOld raw pointer to the vector sotring all the fluxes at the previous time-step
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param dt time increment
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void liftUnsteady(real *FyU, real *q, real *qOld, real *dx, real *dy, real dt,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if( idx >= ncx*(ncy+1) )
return;
int i = idx%ncx,
j = idx/ncx;
int Iv = (J-1+j)*nx + (I+i) + (nx-1)*ny;
FyU[idx] = - (q[Iv] - qOld[Iv])/dt * 0.5*(dy[J+j]+dy[J-1+j]);
}
/**
* \brief To be documented
*/
__global__
void forceX(real *f, real *q, real *rn, int *tags,
int nx, int ny, real *dx, real *dy,
real dt, real alpha, real nu)
{
int bx = blockIdx.x,
by = blockIdx.y,
i = threadIdx.x,
j = threadIdx.y;
// work out global index of first point in block
int I = (BSZ-2)*bx + i,
J = (BSZ-2)*by + j;
if (I >= nx-1 || J >= ny) {
return;
}
int Gidx_x = J*(nx-1) + I;
real dTerm;
__shared__ real u[BSZ][BSZ];
__shared__ real Dx[BSZ][BSZ], Dy[BSZ][BSZ];
Dy[j][i] = dy[J];
Dx[j][i] = dx[I];
/// transfer from global to shared memory
u[j][i] = q[Gidx_x]/Dy[j][i];
__syncthreads();
/// check bounds for convective term in the x-direction
int global_check = ( I==0 || I==(nx-2) || J==0 || J==(ny-1) ), ///< check if we compute globally
block_check = ( i==0 || i==(BSZ-1) || j==0 || j==(BSZ-1) ); ///< check if element within block computes
/// X-component
if( !(global_check || block_check) )
{
dTerm = alpha*nu*2.0*( \
( Dx[j][i]*u[j][i+1] - (Dx[j][i]+Dx[j][i+1])*u[j][i] + Dx[j][i+1]*u[j][i-1] ) / ( Dx[j][i]*Dx[j][i+1]*(Dx[j][i]+Dx[j][i+1]) ) \
+ 4.0*( (Dy[j][i]+Dy[j-1][i])*u[j+1][i] - (Dy[j-1][i] + 2.0*Dy[j][i] + Dy[j+1][i])*u[j][i] + (Dy[j][i]+Dy[j+1][i])*u[j-1][i] ) \
/( (Dy[j][i]+Dy[j-1][i]) * (Dy[j-1][i] + 2.0*Dy[j][i] + Dy[j+1][i]) * (Dy[j][i]+Dy[j+1][i]) ) \
);
f[Gidx_x] = ( u[j][i]/dt - dTerm - rn[Gidx_x]/(0.5*(Dx[j][i]+Dx[j][i+1])) ) * (!(tags[Gidx_x]==-1));
}
}
/**
* \brief Doing nothing.
*/
__global__
void forceY(){}
} // end of namespace kernels
|
70dea03227c106338ce0832e89bd99c437598e1d.cu
|
/***************************************************************************//**
* \file calculateForce.cu
* \author Anush Krishnan ([email protected])
* \brief Implementation of the kernels to calculate the forces acting on a body
* using a control-volume approach.
* The method is described in Lai & Peskin (2000).
*/
#include "calculateForce.h"
#define BSZ 16
/**
* \namespace kernels
* \brief Contains all the custom-written CUDA kernels.
*/
namespace kernels
{
/**
* \brief Calculates drag using a control-volume approach (left-right).
*
* Evaluate the contribution from the left and right parts of the control surface.
*
* \param FxX raw pointer to the vector storing the drag in the x-direction
* \param lambda raw pointer to the vector storing all the pressure and Lagrangian forces
* \param q raw pointer to the vector storing all the fluxes
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param I x-index of the bottom-left corner cell of the control surface
* \param J y-index of the top-right corner cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param ncy number of cells in the y-direction in the control volume
*/
__global__
void dragLeftRight(real *FxX, real *q, real *lambda, real nu, real *dx, real *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= ncy)
return;
int Ip = (J+idx)*nx + I,
Iu = (J+idx)*(nx-1) + (I-1);
FxX[idx] = -(
// multiply the pressure with the surface area to get p dy
(lambda[Ip+ncx]-lambda[Ip-1])*dy[J+idx]
+
// divide q^2 by dy, so that just u^2 dy is obtained
(
0.25*(q[Iu+ncx+1] + q[Iu+ncx])*(q[Iu+ncx+1] + q[Iu+ncx])
- 0.25*(q[Iu] + q[Iu-1])*(q[Iu] + q[Iu-1])
)/dy[J+idx]
-
// no multiplication or division since du/dx dy = dq/dx
nu*
(
(q[Iu+ncx+1] - q[Iu+ncx])/dx[I+ncx]
- (q[Iu] - q[Iu-1])/dx[I-1]
)
);
}
/**
* \brief Calculate drag using a control-volume approach (bottom-top).
*
* Evaluate the contribution from the bottom and top parts of the control surface.
*
* \param FxY raw pointer to the vector storing the drag in the y-direction
* \param q raw pointer to the vector storing all the fluxes
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param I x-index of the bottom-left corner cell of the control surface
* \param J y-index of the top-right corner cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param ncy number of cells in the y-direction in the control volume
*/
__global__
void dragBottomTop(real *FxY, real *q, real nu, real *dx, real *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx > ncx)
return;
int Iu = J*(nx-1) + (I-1+idx),
Iv = (nx-1)*ny + (J-1)*nx + I+idx;
FxY[idx] = -(
// multiply by dS
(
0.25 * ( q[Iu+ncy*(nx-1)]/dy[J+ncy] + q[Iu+(ncy-1)*(nx-1)]/dy[J+ncy-1] )
* ( q[Iv+ncy*nx]/dx[I+idx] + q[Iv+ncy*nx-1]/dx[I+idx-1] )
-
0.25 * ( q[Iu]/dy[J] + q[Iu-(nx-1)]/dy[J-1] )
* ( q[Iv]/dx[I+idx] + q[Iv-1]/dx[I+idx-1] )
)
-
// multiply by dS (cannot use the leftRight trick in this case)
nu*
(
(
(q[Iu+ncy*(nx-1)]/dy[J+ncy] - q[Iu+(ncy-1)*(nx-1)]/dy[J+ncy-1])/2.0/(dy[J+ncy]+dy[J+ncy-1]) +
(q[Iv+ncy*nx]/dx[I+idx] - q[Iv+ncy*nx-1]/dx[I+idx-1])/2.0/(dx[I+idx]+dx[I+idx-1])
)
-
(
(q[Iu]/dy[J] - q[Iu-(nx-1)]/dy[J-1])/2.0/(dy[J]+dy[J-1]) +
(q[Iv]/dx[I+idx] - q[Iv-1]/dx[I+idx-1])/2.0/(dx[I+idx]+dx[I+idx-1])
)
)
)*0.5*(dx[I+idx]+dx[I+idx-1]);
}
/**
* \brief Calculate drag using a control-volume approach (unsteady).
*
* Evaluate the unsteady contribution of the control volume.
*
* \param FxU raw pointer to the vector storing the unsteady drag components
* \param q raw pointer to the vector storing all the fluxes
* \param qOld raw pointer to the vector sotring all the fluxes at the previous time-step
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param dt time increment
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void dragUnsteady(real *FxU, real *q, real *qOld, real *dx, real *dy, real dt,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= (ncx+1)*ncy)
return;
int i = idx%(ncx+1),
j = idx/(ncx+1);
int Iu = (J+j)*(nx-1) + (I-1+i);
FxU[idx] = - (q[Iu] - qOld[Iu])/dt * 0.5*(dx[I+i]+dx[I-1+i]);
}
/**
* \brief Calculate lift using a control-volume approach (left-right).
*
* Evaluate the contribution from the left and right parts of the control surface.
*
* \param FyX raw pointer to the vector storing the lift components in the x-direction
* \param q raw pointer to the vector storing all the fluxes
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void liftLeftRight(real *FyX, real *q, real nu, real *dx, real *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx > ncy)
return;
int Iu = (J+idx)*(nx-1) + (I-1),
Iv = (nx-1)*ny + (J-1+idx)*nx + I;
FyX[idx] = -(
// multiply by dS
(
0.25 * ( q[Iu+ncx]/dy[J+idx] + q[Iu+ncx-(nx-1)]/dy[J-1+idx] )
* ( q[Iv+ncx]/dx[I+ncx] + q[Iv+ncx-1]/dx[I+ncx-1] )
-
0.25 * ( q[Iu]/dy[J+idx] + q[Iu-(nx-1)]/dy[J-1+idx] )
* ( q[Iv]/dx[I] + q[Iv-1]/dx[I-1] )
)
-
// multiply by dS (cannot use the leftRight trick in this case)
nu*
(
(
(q[Iu+ncx]/dy[J+idx] - q[Iu+ncx-(nx-1)]/dy[J-1+idx])/2.0/(dy[J+idx]+dy[J-1+idx]) +
(q[Iv+ncx]/dx[I+ncx] - q[Iv+ncx-1]/dx[I+ncx-1])/2.0/(dx[I+ncx]+dx[I+ncx-1])
)
-
(
(q[Iu]/dy[J+idx] - q[Iu-(nx-1)]/dy[J-1+idx])/2.0/(dy[J+idx]+dy[J-1+idx]) +
(q[Iv]/dx[I] - q[Iv-1]/dx[I-1])/2.0/(dx[I]+dx[I-1])
)
)
)*0.5*(dy[J+idx]+dy[J-1+idx]);
}
/**
* \brief Calculate lift using a control-volume approach (bottom-top).
*
* Evaluate the contribution from the bottom and top parts of the control surface.
*
* \param FyY raw pointer to the vector storing the lift components in the y-direction
* \param q raw pointer to the vector storing all the fluxes
* \param lambda raw pointer to the vector storing the pressure and Lagrangian forces
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void liftBottomTop(real *FyY, real *q, real *lambda, real nu, real *dx, real *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= ncx)
return;
int Ip = J*nx + I+idx,
Iv = (nx-1)*ny + (J-1)*nx + I+idx;
FyY[idx] = -(
// multiply the pressure with the surface area to get p dx
(lambda[Ip+ncy*nx]-lambda[Ip-nx])*dx[I+idx]
+
// divide q^2 by dx, so that just v^2 dx is obtained
(
0.25*(q[Iv+(ncy+1)*nx] + q[Iv+ncy*nx])*(q[Iv+(ncy+1)*nx] + q[Iv+ncy*nx])
- 0.25*(q[Iv] + q[Iv-nx])*(q[Iv] + q[Iv-nx])
)/dx[I+idx]
-
// no multiplication or division since dv/dy dx = dq/dy
nu*
(
(q[Iv+(ncy+1)*nx] - q[Iv+ncy*nx])/dy[J+ncy]
- (q[Iv] - q[Iv-nx])/dy[J-1]
)
);
}
/**
* \brief Calculate lift using a control-volume approach (unsteady).
*
* Evaluate the unsteady contribution of the control volume.
*
* \param FyU raw pointer to the vector storing the unsteady lift components
* \param q raw pointer to the vector storing all the fluxes
* \param qOld raw pointer to the vector sotring all the fluxes at the previous time-step
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param dt time increment
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void liftUnsteady(real *FyU, real *q, real *qOld, real *dx, real *dy, real dt,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if( idx >= ncx*(ncy+1) )
return;
int i = idx%ncx,
j = idx/ncx;
int Iv = (J-1+j)*nx + (I+i) + (nx-1)*ny;
FyU[idx] = - (q[Iv] - qOld[Iv])/dt * 0.5*(dy[J+j]+dy[J-1+j]);
}
/**
* \brief To be documented
*/
__global__
void forceX(real *f, real *q, real *rn, int *tags,
int nx, int ny, real *dx, real *dy,
real dt, real alpha, real nu)
{
int bx = blockIdx.x,
by = blockIdx.y,
i = threadIdx.x,
j = threadIdx.y;
// work out global index of first point in block
int I = (BSZ-2)*bx + i,
J = (BSZ-2)*by + j;
if (I >= nx-1 || J >= ny) {
return;
}
int Gidx_x = J*(nx-1) + I;
real dTerm;
__shared__ real u[BSZ][BSZ];
__shared__ real Dx[BSZ][BSZ], Dy[BSZ][BSZ];
Dy[j][i] = dy[J];
Dx[j][i] = dx[I];
/// transfer from global to shared memory
u[j][i] = q[Gidx_x]/Dy[j][i];
__syncthreads();
/// check bounds for convective term in the x-direction
int global_check = ( I==0 || I==(nx-2) || J==0 || J==(ny-1) ), ///< check if we compute globally
block_check = ( i==0 || i==(BSZ-1) || j==0 || j==(BSZ-1) ); ///< check if element within block computes
/// X-component
if( !(global_check || block_check) )
{
dTerm = alpha*nu*2.0*( \
( Dx[j][i]*u[j][i+1] - (Dx[j][i]+Dx[j][i+1])*u[j][i] + Dx[j][i+1]*u[j][i-1] ) / ( Dx[j][i]*Dx[j][i+1]*(Dx[j][i]+Dx[j][i+1]) ) \
+ 4.0*( (Dy[j][i]+Dy[j-1][i])*u[j+1][i] - (Dy[j-1][i] + 2.0*Dy[j][i] + Dy[j+1][i])*u[j][i] + (Dy[j][i]+Dy[j+1][i])*u[j-1][i] ) \
/( (Dy[j][i]+Dy[j-1][i]) * (Dy[j-1][i] + 2.0*Dy[j][i] + Dy[j+1][i]) * (Dy[j][i]+Dy[j+1][i]) ) \
);
f[Gidx_x] = ( u[j][i]/dt - dTerm - rn[Gidx_x]/(0.5*(Dx[j][i]+Dx[j][i+1])) ) * (!(tags[Gidx_x]==-1));
}
}
/**
* \brief Doing nothing.
*/
__global__
void forceY(){}
} // end of namespace kernels
|
02942e95e197a272c069f5482eb7aaa53ba6812c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
|
02942e95e197a272c069f5482eb7aaa53ba6812c.cu
|
#include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
|
60ddc7cbf37751c839553ddb0a47be07c93cf031.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/fluid/operators/math/pooling.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/fast_divmod.h"
namespace paddle {
namespace operators {
namespace math {
struct FastDivModForPooling {
public:
platform::FastDivMod channel;
platform::FastDivMod width;
platform::FastDivMod height;
explicit HOSTDEVICE FastDivModForPooling(const int channels,
const int output_width,
const int output_height) {
channel = platform::FastDivMod(channels);
width = platform::FastDivMod(output_width);
height = platform::FastDivMod(output_height);
}
};
struct FastDivModForPoolingWithMoreStaff {
public:
platform::FastDivMod channel;
platform::FastDivMod width;
platform::FastDivMod height;
platform::FastDivMod ksize_w;
platform::FastDivMod ksize_h;
platform::FastDivMod stride_w;
platform::FastDivMod stride_h;
explicit HOSTDEVICE FastDivModForPoolingWithMoreStaff(
const int channels, const int input_width, const int input_height,
const int ksize_width, const int ksize_height, const int stride_width,
const int stride_height) {
channel = platform::FastDivMod(channels);
width = platform::FastDivMod(input_width);
height = platform::FastDivMod(input_height);
ksize_w = platform::FastDivMod(ksize_width);
ksize_h = platform::FastDivMod(ksize_height);
stride_w = platform::FastDivMod(stride_width);
stride_h = platform::FastDivMod(stride_height);
}
};
template <typename FastDivModForPooling>
__device__ void OffsetPreparationFor4Dimension(
int index, bool channel_last, FastDivModForPooling divmods,
const int pad_width, const int pad_height, const int aux_width,
const int aux_height, int* w_offset, int* h_offset, int* c_offset,
int* stride) {
if (!channel_last) { /* NCHW */
auto input_width_divmod = divmods.width.Divmod(index);
auto input_height_divmod = divmods.height.Divmod(input_width_divmod.val[0]);
auto channel_divmod = divmods.channel.Divmod(input_height_divmod.val[0]);
*w_offset = input_width_divmod.val[1] + pad_width;
*h_offset = input_height_divmod.val[1] + pad_height;
*c_offset = channel_divmod.val[1];
*stride = (channel_divmod.val[0] * divmods.channel.divisor + *c_offset) *
aux_height * aux_width;
} else { /* NHWC */
auto c_divmod = divmods.channel.Divmod(index);
auto input_width_divmod = divmods.width.Divmod(c_divmod.val[0]);
auto input_height_divmod = divmods.height.Divmod(input_width_divmod.val[0]);
*c_offset = c_divmod.val[1];
*w_offset = input_width_divmod.val[1] + pad_width;
*h_offset = input_height_divmod.val[1] + pad_height;
*stride = input_height_divmod.val[0] * aux_height * aux_width *
divmods.channel.divisor;
}
}
template <typename PoolProcess, typename T>
__global__ void KernelPool2D(
const int nthreads, const T* input_data, const int channels,
const int input_height, const int input_width, const int output_height,
const int output_width, const int ksize_height, const int ksize_width,
const int stride_height, const int stride_width, const int padding_height,
const int padding_width, FastDivModForPooling divmods,
PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int hstart, hend, wstart, wend;
int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(
index, channel_last, divmods, 0, 0, input_width, input_height,
&w_offset, &h_offset, &c_offset, &input_offset);
input_data += input_offset;
if (adaptive) {
hstart = AdaptStartIndex(h_offset, input_height, output_height);
hend = AdaptEndIndex(h_offset, input_height, output_height);
wstart = AdaptStartIndex(w_offset, input_width, output_width);
wend = AdaptEndIndex(w_offset, input_width, output_width);
} else {
hstart = h_offset * stride_height - padding_height;
hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
wstart = w_offset * stride_width - padding_width;
wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
}
T ele = pool_process.initial();
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
auto input_idx = channel_last
? (h * input_width + w) * channels + c_offset
: h * input_width + w;
pool_process.compute(input_data[input_idx], &ele);
}
}
int pool_size = (exclusive || adaptive) ? (hend - hstart) * (wend - wstart)
: ksize_height * ksize_width;
pool_process.finalize(static_cast<T>(pool_size), &ele);
output_data[index] = ele;
}
}
template <typename T, typename PoolProcess>
__global__ void KernelPool2DGrad(
const int nthreads, const T* __restrict__ input_data,
const T* __restrict__ output_data, const const T* __restrict__ output_grad,
const int output_width, const int output_height, const int input_width,
const int input_height, const int ksize_width, const int ksize_height,
const int stride_width, const int stride_height, const int padding_width,
const int padding_height, FastDivModForPoolingWithMoreStaff divmods,
PoolProcess pool_process, bool exclusive, bool adaptive,
T* __restrict__ input_grad, bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
T input = static_cast<T>(0);
T input_grad_data = static_cast<T>(0);
int phstart, phend, pwstart, pwend;
int w_offset, h_offset, c_offset, output_offset;
OffsetPreparationFor4Dimension<>(index, channel_last, divmods,
padding_width, padding_height,
output_width, output_height, &w_offset,
&h_offset, &c_offset, &output_offset);
if (pool_process.use_x) {
input = input_data[index];
output_data += output_offset;
}
output_grad += output_offset;
if (adaptive) {
auto tmp_phend = divmods.height.Divmod((h_offset + 1) * output_height);
auto tmp_pwend = divmods.width.Divmod((w_offset + 1) * output_width);
phstart = divmods.height.Div(h_offset * output_height);
pwstart = divmods.width.Div(w_offset * output_width);
phend = tmp_phend.val[1] > 0 ? tmp_phend.val[0] + 1 : tmp_phend.val[0];
pwend = tmp_pwend.val[1] > 0 ? tmp_pwend.val[0] + 1 : tmp_pwend.val[0];
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
auto ksize_w_divmod = divmods.ksize_w.Divmod(input_width);
auto ksize_h_divmod = divmods.ksize_h.Divmod(input_height);
auto tmp_width = ksize_w_divmod.val[1] > 0 ? ksize_w_divmod.val[0] + 1
: ksize_w_divmod.val[0];
auto tmp_height = ksize_h_divmod.val[1] > 0
? ksize_h_divmod.val[0] + 1
: ksize_h_divmod.val[0];
int pool_size = tmp_height * tmp_width;
int tmp_idx = ph * output_width + pw;
int output_sub_idx =
channel_last ? tmp_idx * divmods.channel.divisor + c_offset
: tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(input, ouput_value, output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
}
}
} else {
auto stride_height_div = divmods.stride_h.Div(h_offset - ksize_height);
auto stride_width_div = divmods.stride_w.Div(w_offset - ksize_width);
phstart = (h_offset < ksize_height) ? 0 : stride_height_div + 1;
pwstart = (w_offset < ksize_width) ? 0 : stride_width_div + 1;
phend = min(divmods.stride_h.Div(h_offset) + 1, output_height);
pwend = min(divmods.stride_w.Div(w_offset) + 1, output_width);
if (exclusive) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
int pool_size = (hend - hstart) * (wend - wstart);
int tmp_idx = ph * output_width + pw;
int output_sub_idx =
channel_last ? tmp_idx * divmods.channel.divisor + c_offset
: tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(
input, ouput_value, output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size), &input_grad_data);
}
}
} else {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int pool_size = ksize_height * ksize_width;
int tmp_idx = ph * output_width + pw;
int output_sub_idx =
channel_last ? tmp_idx * divmods.channel.divisor + c_offset
: tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(
input, ouput_value, output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size), &input_grad_data);
}
}
}
}
input_grad[index] = input_grad_data;
}
}
template <typename T>
__global__ void KernelMaxPool2DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, const int channels, const int input_height,
const int input_width, const int output_height, const int output_width,
const int ksize_height, const int ksize_width, const int stride_height,
const int stride_width, const int padding_height, const int padding_width,
T* input_grad, FastDivModForPooling divmods, bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(
index, channel_last, divmods, 0, 0, input_width, input_height,
&w_offset, &h_offset, &c_offset, &input_offset);
input_data += input_offset;
input_grad += input_offset;
int hstart = h_offset * stride_height - padding_height;
int hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
int wstart = w_offset * stride_width - padding_width;
int wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
T ele = output_data[index];
int maxIndex = -1;
bool stop = false;
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
int input_data_idx = channel_last
? (h * input_width + w) * channels + c_offset
: h * input_width + w;
if (ele == input_data[input_data_idx]) {
maxIndex = input_data_idx;
stop = true;
}
}
}
if (maxIndex != -1) {
// atomic add
platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]);
}
}
}
template <typename PoolProcess, typename T>
void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()(
const T* input, const std::vector<int>& input_shape,
const std::vector<int>& output_shape, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& paddings,
bool exclusive, bool adaptive, T* output, gpuStream_t stream,
PoolProcess pool_compute) {
const int batch_size = input_shape[0];
const int input_channels = input_shape[1];
const int input_height = input_shape[2];
const int input_width = input_shape[3];
const int output_channels = output_shape[1];
const int output_height = output_shape[2];
const int output_width = output_shape[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
// platform::ChangeThreadNum(context, &thread_num);
thread_num = 512;
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, stream,
nthreads, input, input_channels, input_height, input_width, output_height,
output_width, ksize_height, ksize_width, stride_height, stride_width,
padding_height, padding_width, pool_divmods, pool_compute, exclusive,
adaptive, output);
}
/*
* Tensors are in NCHW or NHWC format.
* Ksize, strides are two elements. These two elements represent height
* and width, respectively.
* Paddings are four elements. These four elements represent height_up,
* height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive,
bool adaptive, framework::Tensor* output,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, input_channels, input_height, input_width,
output_height, output_width, ksize_height, ksize_width, stride_height,
stride_width, padding_height, padding_width, pool_divmods, pool_process,
exclusive, adaptive, output_data);
}
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive,
framework::Tensor* output, PoolProcess pool_process) {
bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[3] : input.dims()[1];
const int input_height = channel_last ? input.dims()[1] : input.dims()[2];
const int input_width = channel_last ? input.dims()[2] : input.dims()[3];
const int output_channels =
channel_last ? output->dims()[3] : output->dims()[1];
const int output_height =
channel_last ? output->dims()[1] : output->dims()[2];
const int output_width =
channel_last ? output->dims()[2] : output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
hipLaunchKernelGGL(( KernelPool2D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, input_channels, input_height, input_width,
output_height, output_width, ksize_height, ksize_width, stride_height,
stride_width, padding_height, padding_width, pool_divmods, pool_process,
exclusive, adaptive, output_data, channel_last);
}
};
/*
* Tensors are in NCHW or NHWC format.
* Ksize, strides are two elements. These two elements represent height
* and width, respectively.
* Paddings are four elements. These four elements represent height_up,
* height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive,
bool adaptive, framework::Tensor* input_grad,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
auto pool_divmods = FastDivModForPoolingWithMoreStaff(
input_channels, input_width, input_height, ksize_width, ksize_height,
stride_width, stride_height);
auto config = GetGpuLaunchConfig1D(context, nthreads);
hipLaunchKernelGGL(( KernelPool2DGrad<T, PoolProcess>),
dim3(config.block_per_grid), dim3(config.thread_per_block), 0, context.stream(),
nthreads, input_data, output_data, output_grad_data, output_width,
output_height, input_width, input_height, ksize_width, ksize_height,
stride_width, stride_height, padding_width, padding_height,
pool_divmods, pool_process, exclusive, adaptive, input_grad_data);
}
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive,
framework::Tensor* input_grad, PoolProcess pool_process) {
bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[3] : input.dims()[1];
const int input_height = channel_last ? input.dims()[1] : input.dims()[2];
const int input_width = channel_last ? input.dims()[2] : input.dims()[3];
const int output_channels =
channel_last ? output.dims()[3] : output.dims()[1];
const int output_height =
channel_last ? output.dims()[1] : output.dims()[2];
const int output_width = channel_last ? output.dims()[2] : output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
auto pool_divmods = FastDivModForPoolingWithMoreStaff(
input_channels, input_width, input_height, ksize_width, ksize_height,
stride_width, stride_height);
auto config = GetGpuLaunchConfig1D(context, nthreads);
hipLaunchKernelGGL(( KernelPool2DGrad<T, PoolProcess>),
dim3(config.block_per_grid), dim3(config.thread_per_block), 0, context.stream(),
nthreads, input_data, output_data, output_grad_data, output_width,
output_height, input_width, input_height, ksize_width, ksize_height,
stride_width, stride_height, padding_width, padding_height,
pool_divmods, pool_process, exclusive, adaptive, input_grad_data,
channel_last);
}
};
/*
* Tensors are in NCHW or NHWC format.
* Ksize, strides are two elements. These two elements represent height
* and width, respectively.
* Paddings are four elements. These four elements represent height_up,
* height_down, width_left and width_right, respectively.
*/
template <typename T>
class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
hipLaunchKernelGGL(( KernelMaxPool2DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, output_data, output_grad_data, input_channels,
input_height, input_width, output_height, output_width, ksize_height,
ksize_width, stride_height, stride_width, padding_height, padding_width,
input_grad_data, pool_divmods);
}
void operator()(
const platform::CUDADeviceContext& context,
const framework::Tensor& input, const framework::Tensor& output,
const framework::Tensor& output_grad, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& paddings,
const std::string data_format, framework::Tensor* input_grad) {
bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[3] : input.dims()[1];
const int input_height = channel_last ? input.dims()[1] : input.dims()[2];
const int input_width = channel_last ? input.dims()[2] : input.dims()[3];
const int output_channels =
channel_last ? output.dims()[3] : output.dims()[1];
const int output_height =
channel_last ? output.dims()[1] : output.dims()[2];
const int output_width = channel_last ? output.dims()[2] : output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
hipLaunchKernelGGL(( KernelMaxPool2DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, output_data, output_grad_data, input_channels,
input_height, input_width, output_height, output_width, ksize_height,
ksize_width, stride_height, stride_width, padding_height, padding_width,
input_grad_data, pool_divmods, channel_last);
}
};
template class Pool2dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>,
float>;
template class Pool2dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>,
float>;
template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>;
template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>;
template class MaxPool2dGradFunctor<platform::CUDADeviceContext,
paddle::platform::float16>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<float>, float>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<float>, float>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<float>,
float>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<float>,
float>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<double>, double>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<double>, double>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<double>,
double>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<double>,
double>;
template class Pool2dFunctor<
platform::CUDADeviceContext,
paddle::operators::math::MaxPool<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool2dFunctor<
platform::CUDADeviceContext,
paddle::operators::math::AvgPool<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool2dGradFunctor<
platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool2dGradFunctor<
platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<paddle::platform::float16>,
paddle::platform::float16>;
template <typename PoolProcess, typename T>
__global__ void KernelPool3D(
const int nthreads, const T* input_data, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw, ph, pd, c, batch_idx;
if (!channel_last) {
pw = index % output_width;
ph = (index / output_width) % output_height;
pd = (index / output_width / output_height) % output_depth;
c = (index / output_width / output_height / output_depth) % channels;
batch_idx =
index / output_width / output_height / output_depth / channels;
} else {
c = index % channels;
pw = (index / channels) % output_width;
ph = (index / channels / output_width) % output_height;
pd = (index / channels / output_width / output_height) % output_depth;
batch_idx =
index / channels / output_width / output_height / output_depth;
}
int dstart, dend;
int hstart, hend;
int wstart, wend;
if (adaptive) {
dstart = AdaptStartIndex(pd, input_depth, output_depth);
dend = AdaptEndIndex(pd, input_depth, output_depth);
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
dstart = pd * stride_depth - padding_depth;
hstart = ph * stride_height - padding_height;
wstart = pw * stride_width - padding_width;
dend = min(dstart + ksize_depth, input_depth);
hend = min(hstart + ksize_height, input_height);
wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
}
int input_data_stride;
if (!channel_last) { /* NCDHW */
input_data_stride =
(batch_idx * channels + c) * input_depth * input_height * input_width;
} else { /* NDHWC */
input_data_stride =
batch_idx * input_depth * input_height * input_width * channels;
}
input_data += input_data_stride;
T ele = pool_process.initial();
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
auto input_data_idx =
channel_last
? ((d * input_height + h) * input_width + w) * channels + c
: (d * input_height + h) * input_width + w;
pool_process.compute(input_data[input_data_idx], &ele);
}
}
}
int pool_size = (exclusive || adaptive)
? (dend - dstart) * (hend - hstart) * (wend - wstart)
: ksize_depth * ksize_height * ksize_width;
pool_process.finalize(static_cast<T>(pool_size), &ele);
output_data[index] = ele;
}
}
template <typename T, typename PoolProcess>
__global__ void KernelPool3DGrad(
const int nthreads, const T* __restrict__ input_data,
const T* __restrict__ output_data, const T* __restrict__ output_grad,
const int channels, const int input_depth, const int input_height,
const int input_width, const int output_depth, const int output_height,
const int output_width, const int ksize_depth, const int ksize_height,
const int ksize_width, const int stride_depth, const int stride_height,
const int stride_width, const int padding_depth, const int padding_height,
const int padding_width, PoolProcess pool_process, bool exclusive,
bool adaptive, T* input_grad, bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset, h_offset, d_offset, c_offset, batch_idx, output_stride;
T input = static_cast<T>(0);
if (!channel_last) { /* "NCDHW" */
w_offset = index % input_width + padding_width;
h_offset = (index / input_width) % input_height + padding_height;
d_offset =
(index / input_width / input_height) % input_depth + padding_depth;
c_offset = (index / input_width / input_height / input_depth) % channels;
batch_idx = index / input_width / input_height / input_depth / channels;
output_stride = (batch_idx * channels + c_offset) * output_depth *
output_height * output_width;
} else { /* "NDHWC" */
c_offset = index % channels;
w_offset = (index / channels) % input_width + padding_width;
h_offset =
(index / channels / input_width) % input_height + padding_height;
d_offset = (index / channels / input_width / input_height) % input_depth +
padding_depth;
batch_idx = index / channels / input_width / input_height / input_depth;
output_stride =
batch_idx * output_depth * output_height * output_width * channels;
}
int pdstart, pdend;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
pdstart = AdaptStartIndex(d_offset, output_depth, input_depth);
pdend = AdaptEndIndex(d_offset, output_depth, input_depth);
phstart = AdaptStartIndex(h_offset, output_height, input_height);
phend = AdaptEndIndex(h_offset, output_height, input_height);
pwstart = AdaptStartIndex(w_offset, output_width, input_width);
pwend = AdaptEndIndex(w_offset, output_width, input_width);
} else {
pdstart = (d_offset < ksize_depth)
? 0
: (d_offset - ksize_depth) / stride_depth + 1;
phstart = (h_offset < ksize_height)
? 0
: (h_offset - ksize_height) / stride_height + 1;
pwstart = (w_offset < ksize_width)
? 0
: (w_offset - ksize_width) / stride_width + 1;
pdend = min((d_offset) / stride_depth + 1, output_depth);
phend = min((h_offset) / stride_height + 1, output_height);
pwend = min((w_offset) / stride_width + 1, output_width);
}
if (pool_process.use_x) {
input = input_data[index];
output_data += output_stride;
}
output_grad += output_stride;
T input_grad_data = static_cast<T>(0.0);
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int pool_size;
if (adaptive) {
pool_size =
static_cast<int>(
ceil(static_cast<double>(input_depth) / ksize_depth)) *
static_cast<int>(
ceil(static_cast<double>(input_height) / ksize_height)) *
static_cast<int>(
ceil(static_cast<double>(input_width) / ksize_width));
} else {
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
pool_size =
exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart)
: ksize_depth * ksize_height * ksize_width;
}
int output_sub_idx =
channel_last
? ((pd * output_height + ph) * output_width + pw) * channels +
c_offset
: (pd * output_height + ph) * output_width + pw;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(input, ouput_value, output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
}
}
}
input_grad[index] = input_grad_data;
}
}
template <typename T>
__global__ void KernelMaxPool3DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, const int channels, const int input_depth,
const int input_height, const int input_width, const int output_depth,
const int output_height, const int output_width, const int ksize_depth,
const int ksize_height, const int ksize_width, const int stride_depth,
const int stride_height, const int stride_width, const int padding_depth,
const int padding_height, const int padding_width, T* input_grad,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw, ph, pd, c, batch_idx;
if (!channel_last) { /*NCDHW*/
pw = index % output_width;
ph = (index / output_width) % output_height;
pd = (index / output_width / output_height) % output_depth;
c = (index / output_width / output_height / output_depth) % channels;
batch_idx =
index / output_width / output_height / output_depth / channels;
} else { /*NDHWC*/
c = index % channels;
pw = (index / channels) % output_width;
ph = (index / channels / output_width) % output_height;
pd = (index / channels / output_width / output_height) % output_depth;
batch_idx =
index / channels / output_width / output_height / output_depth;
}
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T ele = output_data[index];
bool stop = false;
int maxIdx = -1;
int input_stride;
if (!channel_last) {
input_stride =
(batch_idx * channels + c) * input_depth * input_height * input_width;
} else {
input_stride =
batch_idx * input_depth * input_height * input_width * channels;
}
input_data += input_stride;
input_grad += input_stride;
for (int d = dstart; d < dend && !stop; ++d) {
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
int input_data_idx =
channel_last
? ((d * input_height + h) * input_width + w) * channels + c
: (d * input_height + h) * input_width + w;
if (ele == input_data[input_data_idx]) {
stop = true;
maxIdx = input_data_idx;
}
}
}
}
if (maxIdx != -1) {
// atomic add
platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]);
}
}
}
template <typename PoolProcess, typename T>
void Pool3dDirectCUDAFunctor<PoolProcess, T>::operator()(
const T* input, const std::vector<int>& input_shape,
const std::vector<int>& output_shape, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& paddings,
bool exclusive, bool adaptive, T* output, gpuStream_t stream,
PoolProcess pool_compute) {
const int batch_size = input_shape[0];
const int input_channels = input_shape[1];
const int input_depth = input_shape[2];
const int input_height = input_shape[3];
const int input_width = input_shape[4];
const int output_channels = output_shape[1];
const int output_depth = output_shape[2];
const int output_height = output_shape[3];
const int output_width = output_shape[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
thread_num = 512;
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, stream,
nthreads, input, input_channels, input_depth, input_height, input_width,
output_depth, output_height, output_width, ksize_depth, ksize_height,
ksize_width, stride_depth, stride_height, stride_width, padding_depth,
padding_height, padding_width, pool_compute, exclusive, adaptive, output);
}
/*
* Tensors are in NCDHW or NDHWC format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
* Paddings are six elements. These six elements represent depth_forth,
* depth_back,
* height_up, height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive,
bool adaptive, framework::Tensor* output,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, input_channels, input_depth, input_height,
input_width, output_depth, output_height, output_width, ksize_depth,
ksize_height, ksize_width, stride_depth, stride_height, stride_width,
padding_depth, padding_height, padding_width, pool_process, exclusive,
adaptive, output_data);
}
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive,
framework::Tensor* output, PoolProcess pool_process) {
bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[4] : input.dims()[1];
const int input_depth = channel_last ? input.dims()[1] : input.dims()[2];
const int input_height = channel_last ? input.dims()[2] : input.dims()[3];
const int input_width = channel_last ? input.dims()[3] : input.dims()[4];
const int output_channels =
channel_last ? output->dims()[4] : output->dims()[1];
const int output_depth =
channel_last ? output->dims()[1] : output->dims()[2];
const int output_height =
channel_last ? output->dims()[2] : output->dims()[3];
const int output_width =
channel_last ? output->dims()[3] : output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3D<PoolProcess, T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, input_channels, input_depth, input_height,
input_width, output_depth, output_height, output_width, ksize_depth,
ksize_height, ksize_width, stride_depth, stride_height, stride_width,
padding_depth, padding_height, padding_width, pool_process, exclusive,
adaptive, output_data, channel_last);
}
};
/*
* Tensors are in NCDHW or NDHWC format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
* Paddings are six elements. These six elements represent depth_forth,
* depth_back,
* height_up, height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive,
bool adaptive, framework::Tensor* input_grad,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3DGrad<T, PoolProcess>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, output_data, output_grad_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width, pool_process, exclusive, adaptive, input_grad_data);
}
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive,
framework::Tensor* input_grad, PoolProcess pool_process) {
bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[4] : input.dims()[1];
const int input_depth = channel_last ? input.dims()[1] : input.dims()[2];
const int input_height = channel_last ? input.dims()[2] : input.dims()[3];
const int input_width = channel_last ? input.dims()[3] : input.dims()[4];
const int output_channels =
channel_last ? output.dims()[4] : output.dims()[1];
const int output_depth = channel_last ? output.dims()[1] : output.dims()[2];
const int output_height =
channel_last ? output.dims()[2] : output.dims()[3];
const int output_width = channel_last ? output.dims()[3] : output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelPool3DGrad<T, PoolProcess>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, output_data, output_grad_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width, pool_process, exclusive, adaptive, input_grad_data,
channel_last); // add channel_last
}
};
/*
* tensors are in NCDHW or NDHWC format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
* Paddings are six elements. These six elements represent depth_forth,
* depth_back,
* height_up, height_down, width_left and width_right, respectively.
*/
template <class T>
class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, output_data, output_grad_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width, input_grad_data);
}
void operator()(
const platform::CUDADeviceContext& context,
const framework::Tensor& input, const framework::Tensor& output,
const framework::Tensor& output_grad, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& paddings,
const std::string data_format, framework::Tensor* input_grad) {
bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[4] : input.dims()[1];
const int input_depth = channel_last ? input.dims()[1] : input.dims()[2];
const int input_height = channel_last ? input.dims()[2] : input.dims()[3];
const int input_width = channel_last ? input.dims()[3] : input.dims()[4];
const int output_channels =
channel_last ? output.dims()[4] : output.dims()[1];
const int output_depth = channel_last ? output.dims()[1] : output.dims()[2];
const int output_height =
channel_last ? output.dims()[2] : output.dims()[3];
const int output_width = channel_last ? output.dims()[3] : output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DGrad<T>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, output_data, output_grad_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width, input_grad_data, channel_last); // add channel_last
}
};
template class Pool3dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>,
float>;
template class Pool3dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>,
float>;
template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>;
template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>;
template class MaxPool3dGradFunctor<platform::CUDADeviceContext,
paddle::platform::float16>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<float>, float>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<float>, float>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<float>,
float>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<float>,
float>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<double>, double>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<double>, double>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<double>,
double>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<double>,
double>;
template class Pool3dFunctor<
platform::CUDADeviceContext,
paddle::operators::math::MaxPool<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool3dFunctor<
platform::CUDADeviceContext,
paddle::operators::math::AvgPool<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool3dGradFunctor<
platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool3dGradFunctor<
platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<paddle::platform::float16>,
paddle::platform::float16>;
template <typename T1, typename T2>
__global__ void KernelMaxPool2dWithIdx(
const int nthreads, const T1* input_data, const int channels,
const int input_height, const int input_width, const int output_height,
const int output_width, const int ksize_height, const int ksize_width,
const int stride_height, const int stride_width, const int padding_height,
const int padding_width, bool adaptive, T1* output_data, T2* mask_data,
FastDivModForPooling divmods) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int hstart, hend, wstart, wend;
int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(
index, false, divmods, 0, 0, input_width, input_height, &w_offset,
&h_offset, &c_offset, &input_offset);
input_data += input_offset;
if (adaptive) {
hstart = AdaptStartIndex(h_offset, input_height, output_height);
hend = AdaptEndIndex(h_offset, input_height, output_height);
wstart = AdaptStartIndex(w_offset, input_width, output_width);
wend = AdaptEndIndex(w_offset, input_width, output_width);
} else {
hstart = h_offset * stride_height - padding_height;
hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
wstart = w_offset * stride_width - padding_width;
wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
}
T1 ele = -FLT_MAX;
int max_index = -1;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * input_width + w;
if (ele < input_data[input_index]) {
max_index = input_index;
ele = input_data[input_index];
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T1, typename T2>
__global__ void KernelMaxPool2DWithIdxGrad(
const int nthreads, const T1* output_grad, const T2* mask_data,
const int channels, const int input_height, const int input_width,
const int output_height, const int output_width, const int ksize_height,
const int ksize_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, bool adaptive,
T1* input_grad, FastDivModForPooling divmods) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int phstart, phend, pwstart, pwend;
int w_offset, h_offset, c_offset, output_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(
index, false, divmods, 0, 0, output_width, output_height, &w_offset,
&h_offset, &c_offset, &output_offset);
mask_data += output_offset;
output_grad += output_offset;
if (adaptive) {
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
phstart =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
pwstart =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
phend =
min((h_offset + padding_height) / stride_height + 1, output_height);
pwend = min((w_offset + padding_width) / stride_width + 1, output_width);
}
T1 input_grad_data = 0;
int input_current_featuremap_idx = h_offset * input_width + w_offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_data[ph * output_width + pw] == input_current_featuremap_idx)
input_grad_data += output_grad[ph * output_width + pw];
}
}
input_grad[index] = input_grad_data;
}
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* output, framework::Tensor* mask) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace());
T2* mask_data = mask->mutable_data<T2>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
hipLaunchKernelGGL(( KernelMaxPool2dWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, input_channels, input_height, input_width,
output_height, output_width, ksize_height, ksize_width, stride_height,
stride_width, padding_height, padding_width, adaptive, output_data,
mask_data, pool_divmods);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1];
const int input_height = input_grad->dims()[2];
const int input_width = input_grad->dims()[3];
const int output_height = output_grad.dims()[2];
const int output_width = output_grad.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T2* mask_data = mask.data<T2>();
const T1* output_grad_data = output_grad.data<T1>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, input_width, input_height);
hipLaunchKernelGGL(( KernelMaxPool2DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, output_grad_data, mask_data, input_channels, input_height,
input_width, output_height, output_width, ksize_height, ksize_width,
stride_height, stride_width, padding_height, padding_width, adaptive,
input_grad_data, pool_divmods);
}
};
template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double,
int>;
template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext,
double, int>;
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdx(
const int nthreads, const T1* input_data, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
bool adaptive, T1* output_data, T2* mask_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart, dend;
int hstart, hend;
int wstart, wend;
if (adaptive) {
dstart = AdaptStartIndex(pd, input_depth, output_depth);
dend = AdaptEndIndex(pd, input_depth, output_depth);
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
dstart = pd * stride_depth - padding_depth;
hstart = ph * stride_height - padding_height;
wstart = pw * stride_width - padding_width;
dend = min(dstart + ksize_depth, input_depth);
hend = min(hstart + ksize_height, input_height);
wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
}
T1 ele = -FLT_MAX;
int max_index = -1;
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (ele < input_data[(d * input_height + h) * input_width + w]) {
max_index = (d * input_height + h) * input_width + w;
ele = input_data[max_index];
}
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdxGrad(
const int nthreads, const T1* output_grad, const T2* mask,
const int channels, const int input_depth, const int input_height,
const int input_width, const int output_depth, const int output_height,
const int output_width, const int ksize_depth, const int ksize_height,
const int ksize_width, const int stride_depth, const int stride_height,
const int stride_width, const int padding_depth, const int padding_height,
const int padding_width, bool adaptive, T1* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
int h_offset = (index / input_width) % input_height;
int d_offset = (index / input_width / input_height) % input_depth;
int c_offset =
(index / input_width / input_height / input_depth) % channels;
int batch_idx = index / input_width / input_height / input_depth / channels;
int pdstart, pdend;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
pdstart = d_offset * output_depth / input_depth;
pdend =
min((d_offset + 1) * output_depth / input_depth + 1, output_depth);
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
pdstart =
(d_offset + padding_depth < ksize_depth)
? 0
: (d_offset + padding_depth - ksize_depth) / stride_depth + 1;
phstart =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
pwstart =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
pdend = min((d_offset + padding_depth) / stride_depth + 1, output_depth);
phend =
min((h_offset + padding_height) / stride_height + 1, output_height);
pwend = min((w_offset + padding_width) / stride_width + 1, output_width);
}
T1 input_grad_data = 0;
int input_current_feature_map_idx =
(d_offset * input_height + h_offset) * input_width + w_offset;
int output_idx = (batch_idx * channels + c_offset) * output_depth *
output_height * output_width;
mask += output_idx;
output_grad += output_idx;
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[(pd * output_height + ph) * output_width + pw] ==
input_current_feature_map_idx)
input_grad_data +=
output_grad[(pd * output_height + ph) * output_width + pw];
}
}
}
input_grad[index] = input_grad_data;
}
}
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* output, framework::Tensor* mask) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace());
T2* mask_data = mask->mutable_data<T2>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DWithIdx<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, input_data, input_channels, input_depth, input_height,
input_width, output_depth, output_height, output_width, ksize_depth,
ksize_height, ksize_width, stride_depth, stride_height, stride_width,
padding_depth, padding_height, padding_width, adaptive, output_data,
mask_data);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1];
const int input_depth = input_grad->dims()[2];
const int input_height = input_grad->dims()[3];
const int input_width = input_grad->dims()[4];
const int output_depth = output_grad.dims()[2];
const int output_height = output_grad.dims()[3];
const int output_width = output_grad.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T1* output_grad_data = output_grad.data<T1>();
const T2* mask_data = mask.data<T2>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxPool3DWithIdxGrad<T1, T2>), dim3(grid), dim3(threads), 0, context.stream(),
nthreads, output_grad_data, mask_data, input_channels, input_depth,
input_height, input_width, output_depth, output_height, output_width,
ksize_depth, ksize_height, ksize_width, stride_depth, stride_height,
stride_width, padding_depth, padding_height, padding_width, adaptive,
input_grad_data);
}
};
template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double,
int>;
template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext,
double, int>;
} // namespace math
} // namespace operators
} // namespace paddle
|
60ddc7cbf37751c839553ddb0a47be07c93cf031.cu
|
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/fluid/operators/math/pooling.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/fast_divmod.h"
namespace paddle {
namespace operators {
namespace math {
struct FastDivModForPooling {
public:
platform::FastDivMod channel;
platform::FastDivMod width;
platform::FastDivMod height;
explicit HOSTDEVICE FastDivModForPooling(const int channels,
const int output_width,
const int output_height) {
channel = platform::FastDivMod(channels);
width = platform::FastDivMod(output_width);
height = platform::FastDivMod(output_height);
}
};
struct FastDivModForPoolingWithMoreStaff {
public:
platform::FastDivMod channel;
platform::FastDivMod width;
platform::FastDivMod height;
platform::FastDivMod ksize_w;
platform::FastDivMod ksize_h;
platform::FastDivMod stride_w;
platform::FastDivMod stride_h;
explicit HOSTDEVICE FastDivModForPoolingWithMoreStaff(
const int channels, const int input_width, const int input_height,
const int ksize_width, const int ksize_height, const int stride_width,
const int stride_height) {
channel = platform::FastDivMod(channels);
width = platform::FastDivMod(input_width);
height = platform::FastDivMod(input_height);
ksize_w = platform::FastDivMod(ksize_width);
ksize_h = platform::FastDivMod(ksize_height);
stride_w = platform::FastDivMod(stride_width);
stride_h = platform::FastDivMod(stride_height);
}
};
template <typename FastDivModForPooling>
__device__ void OffsetPreparationFor4Dimension(
int index, bool channel_last, FastDivModForPooling divmods,
const int pad_width, const int pad_height, const int aux_width,
const int aux_height, int* w_offset, int* h_offset, int* c_offset,
int* stride) {
if (!channel_last) { /* NCHW */
auto input_width_divmod = divmods.width.Divmod(index);
auto input_height_divmod = divmods.height.Divmod(input_width_divmod.val[0]);
auto channel_divmod = divmods.channel.Divmod(input_height_divmod.val[0]);
*w_offset = input_width_divmod.val[1] + pad_width;
*h_offset = input_height_divmod.val[1] + pad_height;
*c_offset = channel_divmod.val[1];
*stride = (channel_divmod.val[0] * divmods.channel.divisor + *c_offset) *
aux_height * aux_width;
} else { /* NHWC */
auto c_divmod = divmods.channel.Divmod(index);
auto input_width_divmod = divmods.width.Divmod(c_divmod.val[0]);
auto input_height_divmod = divmods.height.Divmod(input_width_divmod.val[0]);
*c_offset = c_divmod.val[1];
*w_offset = input_width_divmod.val[1] + pad_width;
*h_offset = input_height_divmod.val[1] + pad_height;
*stride = input_height_divmod.val[0] * aux_height * aux_width *
divmods.channel.divisor;
}
}
template <typename PoolProcess, typename T>
__global__ void KernelPool2D(
const int nthreads, const T* input_data, const int channels,
const int input_height, const int input_width, const int output_height,
const int output_width, const int ksize_height, const int ksize_width,
const int stride_height, const int stride_width, const int padding_height,
const int padding_width, FastDivModForPooling divmods,
PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int hstart, hend, wstart, wend;
int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(
index, channel_last, divmods, 0, 0, input_width, input_height,
&w_offset, &h_offset, &c_offset, &input_offset);
input_data += input_offset;
if (adaptive) {
hstart = AdaptStartIndex(h_offset, input_height, output_height);
hend = AdaptEndIndex(h_offset, input_height, output_height);
wstart = AdaptStartIndex(w_offset, input_width, output_width);
wend = AdaptEndIndex(w_offset, input_width, output_width);
} else {
hstart = h_offset * stride_height - padding_height;
hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
wstart = w_offset * stride_width - padding_width;
wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
}
T ele = pool_process.initial();
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
auto input_idx = channel_last
? (h * input_width + w) * channels + c_offset
: h * input_width + w;
pool_process.compute(input_data[input_idx], &ele);
}
}
int pool_size = (exclusive || adaptive) ? (hend - hstart) * (wend - wstart)
: ksize_height * ksize_width;
pool_process.finalize(static_cast<T>(pool_size), &ele);
output_data[index] = ele;
}
}
template <typename T, typename PoolProcess>
__global__ void KernelPool2DGrad(
const int nthreads, const T* __restrict__ input_data,
const T* __restrict__ output_data, const const T* __restrict__ output_grad,
const int output_width, const int output_height, const int input_width,
const int input_height, const int ksize_width, const int ksize_height,
const int stride_width, const int stride_height, const int padding_width,
const int padding_height, FastDivModForPoolingWithMoreStaff divmods,
PoolProcess pool_process, bool exclusive, bool adaptive,
T* __restrict__ input_grad, bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
T input = static_cast<T>(0);
T input_grad_data = static_cast<T>(0);
int phstart, phend, pwstart, pwend;
int w_offset, h_offset, c_offset, output_offset;
OffsetPreparationFor4Dimension<>(index, channel_last, divmods,
padding_width, padding_height,
output_width, output_height, &w_offset,
&h_offset, &c_offset, &output_offset);
if (pool_process.use_x) {
input = input_data[index];
output_data += output_offset;
}
output_grad += output_offset;
if (adaptive) {
auto tmp_phend = divmods.height.Divmod((h_offset + 1) * output_height);
auto tmp_pwend = divmods.width.Divmod((w_offset + 1) * output_width);
phstart = divmods.height.Div(h_offset * output_height);
pwstart = divmods.width.Div(w_offset * output_width);
phend = tmp_phend.val[1] > 0 ? tmp_phend.val[0] + 1 : tmp_phend.val[0];
pwend = tmp_pwend.val[1] > 0 ? tmp_pwend.val[0] + 1 : tmp_pwend.val[0];
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
auto ksize_w_divmod = divmods.ksize_w.Divmod(input_width);
auto ksize_h_divmod = divmods.ksize_h.Divmod(input_height);
auto tmp_width = ksize_w_divmod.val[1] > 0 ? ksize_w_divmod.val[0] + 1
: ksize_w_divmod.val[0];
auto tmp_height = ksize_h_divmod.val[1] > 0
? ksize_h_divmod.val[0] + 1
: ksize_h_divmod.val[0];
int pool_size = tmp_height * tmp_width;
int tmp_idx = ph * output_width + pw;
int output_sub_idx =
channel_last ? tmp_idx * divmods.channel.divisor + c_offset
: tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(input, ouput_value, output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
}
}
} else {
auto stride_height_div = divmods.stride_h.Div(h_offset - ksize_height);
auto stride_width_div = divmods.stride_w.Div(w_offset - ksize_width);
phstart = (h_offset < ksize_height) ? 0 : stride_height_div + 1;
pwstart = (w_offset < ksize_width) ? 0 : stride_width_div + 1;
phend = min(divmods.stride_h.Div(h_offset) + 1, output_height);
pwend = min(divmods.stride_w.Div(w_offset) + 1, output_width);
if (exclusive) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
int pool_size = (hend - hstart) * (wend - wstart);
int tmp_idx = ph * output_width + pw;
int output_sub_idx =
channel_last ? tmp_idx * divmods.channel.divisor + c_offset
: tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(
input, ouput_value, output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size), &input_grad_data);
}
}
} else {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int pool_size = ksize_height * ksize_width;
int tmp_idx = ph * output_width + pw;
int output_sub_idx =
channel_last ? tmp_idx * divmods.channel.divisor + c_offset
: tmp_idx;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(
input, ouput_value, output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size), &input_grad_data);
}
}
}
}
input_grad[index] = input_grad_data;
}
}
template <typename T>
__global__ void KernelMaxPool2DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, const int channels, const int input_height,
const int input_width, const int output_height, const int output_width,
const int ksize_height, const int ksize_width, const int stride_height,
const int stride_width, const int padding_height, const int padding_width,
T* input_grad, FastDivModForPooling divmods, bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(
index, channel_last, divmods, 0, 0, input_width, input_height,
&w_offset, &h_offset, &c_offset, &input_offset);
input_data += input_offset;
input_grad += input_offset;
int hstart = h_offset * stride_height - padding_height;
int hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
int wstart = w_offset * stride_width - padding_width;
int wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
T ele = output_data[index];
int maxIndex = -1;
bool stop = false;
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
int input_data_idx = channel_last
? (h * input_width + w) * channels + c_offset
: h * input_width + w;
if (ele == input_data[input_data_idx]) {
maxIndex = input_data_idx;
stop = true;
}
}
}
if (maxIndex != -1) {
// atomic add
platform::CudaAtomicAdd(input_grad + maxIndex, output_grad[index]);
}
}
}
template <typename PoolProcess, typename T>
void Pool2dDirectCUDAFunctor<PoolProcess, T>::operator()(
const T* input, const std::vector<int>& input_shape,
const std::vector<int>& output_shape, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& paddings,
bool exclusive, bool adaptive, T* output, gpuStream_t stream,
PoolProcess pool_compute) {
const int batch_size = input_shape[0];
const int input_channels = input_shape[1];
const int input_height = input_shape[2];
const int input_width = input_shape[3];
const int output_channels = output_shape[1];
const int output_height = output_shape[2];
const int output_width = output_shape[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
// platform::ChangeThreadNum(context, &thread_num);
thread_num = 512;
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
KernelPool2D<PoolProcess, T><<<grid, threads, 0, stream>>>(
nthreads, input, input_channels, input_height, input_width, output_height,
output_width, ksize_height, ksize_width, stride_height, stride_width,
padding_height, padding_width, pool_divmods, pool_compute, exclusive,
adaptive, output);
}
/*
* Tensors are in NCHW or NHWC format.
* Ksize, strides are two elements. These two elements represent height
* and width, respectively.
* Paddings are four elements. These four elements represent height_up,
* height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive,
bool adaptive, framework::Tensor* output,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_height, input_width,
output_height, output_width, ksize_height, ksize_width, stride_height,
stride_width, padding_height, padding_width, pool_divmods, pool_process,
exclusive, adaptive, output_data);
}
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive,
framework::Tensor* output, PoolProcess pool_process) {
bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[3] : input.dims()[1];
const int input_height = channel_last ? input.dims()[1] : input.dims()[2];
const int input_width = channel_last ? input.dims()[2] : input.dims()[3];
const int output_channels =
channel_last ? output->dims()[3] : output->dims()[1];
const int output_height =
channel_last ? output->dims()[1] : output->dims()[2];
const int output_width =
channel_last ? output->dims()[2] : output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
KernelPool2D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_height, input_width,
output_height, output_width, ksize_height, ksize_width, stride_height,
stride_width, padding_height, padding_width, pool_divmods, pool_process,
exclusive, adaptive, output_data, channel_last);
}
};
/*
* Tensors are in NCHW or NHWC format.
* Ksize, strides are two elements. These two elements represent height
* and width, respectively.
* Paddings are four elements. These four elements represent height_up,
* height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, typename T>
class Pool2dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive,
bool adaptive, framework::Tensor* input_grad,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
auto pool_divmods = FastDivModForPoolingWithMoreStaff(
input_channels, input_width, input_height, ksize_width, ksize_height,
stride_width, stride_height);
auto config = GetGpuLaunchConfig1D(context, nthreads);
KernelPool2DGrad<T, PoolProcess><<<
config.block_per_grid, config.thread_per_block, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, output_width,
output_height, input_width, input_height, ksize_width, ksize_height,
stride_width, stride_height, padding_width, padding_height,
pool_divmods, pool_process, exclusive, adaptive, input_grad_data);
}
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive,
framework::Tensor* input_grad, PoolProcess pool_process) {
bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[3] : input.dims()[1];
const int input_height = channel_last ? input.dims()[1] : input.dims()[2];
const int input_width = channel_last ? input.dims()[2] : input.dims()[3];
const int output_channels =
channel_last ? output.dims()[3] : output.dims()[1];
const int output_height =
channel_last ? output.dims()[1] : output.dims()[2];
const int output_width = channel_last ? output.dims()[2] : output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
auto pool_divmods = FastDivModForPoolingWithMoreStaff(
input_channels, input_width, input_height, ksize_width, ksize_height,
stride_width, stride_height);
auto config = GetGpuLaunchConfig1D(context, nthreads);
KernelPool2DGrad<T, PoolProcess><<<
config.block_per_grid, config.thread_per_block, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, output_width,
output_height, input_width, input_height, ksize_width, ksize_height,
stride_width, stride_height, padding_width, padding_height,
pool_divmods, pool_process, exclusive, adaptive, input_grad_data,
channel_last);
}
};
/*
* Tensors are in NCHW or NHWC format.
* Ksize, strides are two elements. These two elements represent height
* and width, respectively.
* Paddings are four elements. These four elements represent height_up,
* height_down, width_left and width_right, respectively.
*/
template <typename T>
class MaxPool2dGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels,
input_height, input_width, output_height, output_width, ksize_height,
ksize_width, stride_height, stride_width, padding_height, padding_width,
input_grad_data, pool_divmods);
}
void operator()(
const platform::CUDADeviceContext& context,
const framework::Tensor& input, const framework::Tensor& output,
const framework::Tensor& output_grad, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& paddings,
const std::string data_format, framework::Tensor* input_grad) {
bool channel_last = (data_format == "NHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[3] : input.dims()[1];
const int input_height = channel_last ? input.dims()[1] : input.dims()[2];
const int input_width = channel_last ? input.dims()[2] : input.dims()[3];
const int output_channels =
channel_last ? output.dims()[3] : output.dims()[1];
const int output_height =
channel_last ? output.dims()[1] : output.dims()[2];
const int output_width = channel_last ? output.dims()[2] : output.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
KernelMaxPool2DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels,
input_height, input_width, output_height, output_width, ksize_height,
ksize_width, stride_height, stride_width, padding_height, padding_width,
input_grad_data, pool_divmods, channel_last);
}
};
template class Pool2dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>,
float>;
template class Pool2dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>,
float>;
template class MaxPool2dGradFunctor<platform::CUDADeviceContext, float>;
template class MaxPool2dGradFunctor<platform::CUDADeviceContext, double>;
template class MaxPool2dGradFunctor<platform::CUDADeviceContext,
paddle::platform::float16>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<float>, float>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<float>, float>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<float>,
float>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<float>,
float>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<double>, double>;
template class Pool2dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<double>, double>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<double>,
double>;
template class Pool2dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<double>,
double>;
template class Pool2dFunctor<
platform::CUDADeviceContext,
paddle::operators::math::MaxPool<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool2dFunctor<
platform::CUDADeviceContext,
paddle::operators::math::AvgPool<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool2dGradFunctor<
platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool2dGradFunctor<
platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<paddle::platform::float16>,
paddle::platform::float16>;
template <typename PoolProcess, typename T>
__global__ void KernelPool3D(
const int nthreads, const T* input_data, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
PoolProcess pool_process, bool exclusive, bool adaptive, T* output_data,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw, ph, pd, c, batch_idx;
if (!channel_last) {
pw = index % output_width;
ph = (index / output_width) % output_height;
pd = (index / output_width / output_height) % output_depth;
c = (index / output_width / output_height / output_depth) % channels;
batch_idx =
index / output_width / output_height / output_depth / channels;
} else {
c = index % channels;
pw = (index / channels) % output_width;
ph = (index / channels / output_width) % output_height;
pd = (index / channels / output_width / output_height) % output_depth;
batch_idx =
index / channels / output_width / output_height / output_depth;
}
int dstart, dend;
int hstart, hend;
int wstart, wend;
if (adaptive) {
dstart = AdaptStartIndex(pd, input_depth, output_depth);
dend = AdaptEndIndex(pd, input_depth, output_depth);
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
dstart = pd * stride_depth - padding_depth;
hstart = ph * stride_height - padding_height;
wstart = pw * stride_width - padding_width;
dend = min(dstart + ksize_depth, input_depth);
hend = min(hstart + ksize_height, input_height);
wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
}
int input_data_stride;
if (!channel_last) { /* NCDHW */
input_data_stride =
(batch_idx * channels + c) * input_depth * input_height * input_width;
} else { /* NDHWC */
input_data_stride =
batch_idx * input_depth * input_height * input_width * channels;
}
input_data += input_data_stride;
T ele = pool_process.initial();
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
auto input_data_idx =
channel_last
? ((d * input_height + h) * input_width + w) * channels + c
: (d * input_height + h) * input_width + w;
pool_process.compute(input_data[input_data_idx], &ele);
}
}
}
int pool_size = (exclusive || adaptive)
? (dend - dstart) * (hend - hstart) * (wend - wstart)
: ksize_depth * ksize_height * ksize_width;
pool_process.finalize(static_cast<T>(pool_size), &ele);
output_data[index] = ele;
}
}
template <typename T, typename PoolProcess>
__global__ void KernelPool3DGrad(
const int nthreads, const T* __restrict__ input_data,
const T* __restrict__ output_data, const T* __restrict__ output_grad,
const int channels, const int input_depth, const int input_height,
const int input_width, const int output_depth, const int output_height,
const int output_width, const int ksize_depth, const int ksize_height,
const int ksize_width, const int stride_depth, const int stride_height,
const int stride_width, const int padding_depth, const int padding_height,
const int padding_width, PoolProcess pool_process, bool exclusive,
bool adaptive, T* input_grad, bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset, h_offset, d_offset, c_offset, batch_idx, output_stride;
T input = static_cast<T>(0);
if (!channel_last) { /* "NCDHW" */
w_offset = index % input_width + padding_width;
h_offset = (index / input_width) % input_height + padding_height;
d_offset =
(index / input_width / input_height) % input_depth + padding_depth;
c_offset = (index / input_width / input_height / input_depth) % channels;
batch_idx = index / input_width / input_height / input_depth / channels;
output_stride = (batch_idx * channels + c_offset) * output_depth *
output_height * output_width;
} else { /* "NDHWC" */
c_offset = index % channels;
w_offset = (index / channels) % input_width + padding_width;
h_offset =
(index / channels / input_width) % input_height + padding_height;
d_offset = (index / channels / input_width / input_height) % input_depth +
padding_depth;
batch_idx = index / channels / input_width / input_height / input_depth;
output_stride =
batch_idx * output_depth * output_height * output_width * channels;
}
int pdstart, pdend;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
pdstart = AdaptStartIndex(d_offset, output_depth, input_depth);
pdend = AdaptEndIndex(d_offset, output_depth, input_depth);
phstart = AdaptStartIndex(h_offset, output_height, input_height);
phend = AdaptEndIndex(h_offset, output_height, input_height);
pwstart = AdaptStartIndex(w_offset, output_width, input_width);
pwend = AdaptEndIndex(w_offset, output_width, input_width);
} else {
pdstart = (d_offset < ksize_depth)
? 0
: (d_offset - ksize_depth) / stride_depth + 1;
phstart = (h_offset < ksize_height)
? 0
: (h_offset - ksize_height) / stride_height + 1;
pwstart = (w_offset < ksize_width)
? 0
: (w_offset - ksize_width) / stride_width + 1;
pdend = min((d_offset) / stride_depth + 1, output_depth);
phend = min((h_offset) / stride_height + 1, output_height);
pwend = min((w_offset) / stride_width + 1, output_width);
}
if (pool_process.use_x) {
input = input_data[index];
output_data += output_stride;
}
output_grad += output_stride;
T input_grad_data = static_cast<T>(0.0);
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int pool_size;
if (adaptive) {
pool_size =
static_cast<int>(
ceil(static_cast<double>(input_depth) / ksize_depth)) *
static_cast<int>(
ceil(static_cast<double>(input_height) / ksize_height)) *
static_cast<int>(
ceil(static_cast<double>(input_width) / ksize_width));
} else {
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
pool_size =
exclusive ? (dend - dstart) * (hend - hstart) * (wend - wstart)
: ksize_depth * ksize_height * ksize_width;
}
int output_sub_idx =
channel_last
? ((pd * output_height + ph) * output_width + pw) * channels +
c_offset
: (pd * output_height + ph) * output_width + pw;
T ouput_value = pool_process.use_x ? output_data[output_sub_idx]
: static_cast<T>(0);
pool_process.compute(input, ouput_value, output_grad[output_sub_idx],
static_cast<T>(1.0 / pool_size),
&input_grad_data);
}
}
}
input_grad[index] = input_grad_data;
}
}
template <typename T>
__global__ void KernelMaxPool3DGrad(
const int nthreads, const T* input_data, const T* output_data,
const T* output_grad, const int channels, const int input_depth,
const int input_height, const int input_width, const int output_depth,
const int output_height, const int output_width, const int ksize_depth,
const int ksize_height, const int ksize_width, const int stride_depth,
const int stride_height, const int stride_width, const int padding_depth,
const int padding_height, const int padding_width, T* input_grad,
bool channel_last = false) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw, ph, pd, c, batch_idx;
if (!channel_last) { /*NCDHW*/
pw = index % output_width;
ph = (index / output_width) % output_height;
pd = (index / output_width / output_height) % output_depth;
c = (index / output_width / output_height / output_depth) % channels;
batch_idx =
index / output_width / output_height / output_depth / channels;
} else { /*NDHWC*/
c = index % channels;
pw = (index / channels) % output_width;
ph = (index / channels / output_width) % output_height;
pd = (index / channels / output_width / output_height) % output_depth;
batch_idx =
index / channels / output_width / output_height / output_depth;
}
int dstart = pd * stride_depth - padding_depth;
int hstart = ph * stride_height - padding_height;
int wstart = pw * stride_width - padding_width;
int dend = min(dstart + ksize_depth, input_depth);
int hend = min(hstart + ksize_height, input_height);
int wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T ele = output_data[index];
bool stop = false;
int maxIdx = -1;
int input_stride;
if (!channel_last) {
input_stride =
(batch_idx * channels + c) * input_depth * input_height * input_width;
} else {
input_stride =
batch_idx * input_depth * input_height * input_width * channels;
}
input_data += input_stride;
input_grad += input_stride;
for (int d = dstart; d < dend && !stop; ++d) {
for (int h = hstart; h < hend && !stop; ++h) {
for (int w = wstart; w < wend && !stop; ++w) {
int input_data_idx =
channel_last
? ((d * input_height + h) * input_width + w) * channels + c
: (d * input_height + h) * input_width + w;
if (ele == input_data[input_data_idx]) {
stop = true;
maxIdx = input_data_idx;
}
}
}
}
if (maxIdx != -1) {
// atomic add
platform::CudaAtomicAdd(input_grad + maxIdx, output_grad[index]);
}
}
}
template <typename PoolProcess, typename T>
void Pool3dDirectCUDAFunctor<PoolProcess, T>::operator()(
const T* input, const std::vector<int>& input_shape,
const std::vector<int>& output_shape, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& paddings,
bool exclusive, bool adaptive, T* output, gpuStream_t stream,
PoolProcess pool_compute) {
const int batch_size = input_shape[0];
const int input_channels = input_shape[1];
const int input_depth = input_shape[2];
const int input_height = input_shape[3];
const int input_width = input_shape[4];
const int output_channels = output_shape[1];
const int output_depth = output_shape[2];
const int output_height = output_shape[3];
const int output_width = output_shape[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
thread_num = 512;
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
KernelPool3D<PoolProcess, T><<<grid, threads, 0, stream>>>(
nthreads, input, input_channels, input_depth, input_height, input_width,
output_depth, output_height, output_width, ksize_depth, ksize_height,
ksize_width, stride_depth, stride_height, stride_width, padding_depth,
padding_height, padding_width, pool_compute, exclusive, adaptive, output);
}
/*
* Tensors are in NCDHW or NDHWC format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
* Paddings are six elements. These six elements represent depth_forth,
* depth_back,
* height_up, height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive,
bool adaptive, framework::Tensor* output,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_depth, input_height,
input_width, output_depth, output_height, output_width, ksize_depth,
ksize_height, ksize_width, stride_depth, stride_height, stride_width,
padding_depth, padding_height, padding_width, pool_process, exclusive,
adaptive, output_data);
}
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive,
framework::Tensor* output, PoolProcess pool_process) {
bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[4] : input.dims()[1];
const int input_depth = channel_last ? input.dims()[1] : input.dims()[2];
const int input_height = channel_last ? input.dims()[2] : input.dims()[3];
const int input_width = channel_last ? input.dims()[3] : input.dims()[4];
const int output_channels =
channel_last ? output->dims()[4] : output->dims()[1];
const int output_depth =
channel_last ? output->dims()[1] : output->dims()[2];
const int output_height =
channel_last ? output->dims()[2] : output->dims()[3];
const int output_width =
channel_last ? output->dims()[3] : output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
KernelPool3D<PoolProcess, T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_depth, input_height,
input_width, output_depth, output_height, output_width, ksize_depth,
ksize_height, ksize_width, stride_depth, stride_height, stride_width,
padding_depth, padding_height, padding_width, pool_process, exclusive,
adaptive, output_data, channel_last);
}
};
/*
* Tensors are in NCDHW or NDHWC format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
* Paddings are six elements. These six elements represent depth_forth,
* depth_back,
* height_up, height_down, width_left and width_right, respectively.
*/
template <typename PoolProcess, class T>
class Pool3dGradFunctor<platform::CUDADeviceContext, PoolProcess, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool exclusive,
bool adaptive, framework::Tensor* input_grad,
PoolProcess pool_process) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelPool3DGrad<T, PoolProcess><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width, pool_process, exclusive, adaptive, input_grad_data);
}
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::string data_format, bool exclusive, bool adaptive,
framework::Tensor* input_grad, PoolProcess pool_process) {
bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[4] : input.dims()[1];
const int input_depth = channel_last ? input.dims()[1] : input.dims()[2];
const int input_height = channel_last ? input.dims()[2] : input.dims()[3];
const int input_width = channel_last ? input.dims()[3] : input.dims()[4];
const int output_channels =
channel_last ? output.dims()[4] : output.dims()[1];
const int output_depth = channel_last ? output.dims()[1] : output.dims()[2];
const int output_height =
channel_last ? output.dims()[2] : output.dims()[3];
const int output_width = channel_last ? output.dims()[3] : output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelPool3DGrad<T, PoolProcess><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width, pool_process, exclusive, adaptive, input_grad_data,
channel_last); // add channel_last
}
};
/*
* tensors are in NCDHW or NDHWC format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
* Paddings are six elements. These six elements represent depth_forth,
* depth_back,
* height_up, height_down, width_left and width_right, respectively.
*/
template <class T>
class MaxPool3dGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output,
const framework::Tensor& output_grad,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width, input_grad_data);
}
void operator()(
const platform::CUDADeviceContext& context,
const framework::Tensor& input, const framework::Tensor& output,
const framework::Tensor& output_grad, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& paddings,
const std::string data_format, framework::Tensor* input_grad) {
bool channel_last = (data_format == "NDHWC");
const int batch_size = input.dims()[0];
const int input_channels = channel_last ? input.dims()[4] : input.dims()[1];
const int input_depth = channel_last ? input.dims()[1] : input.dims()[2];
const int input_height = channel_last ? input.dims()[2] : input.dims()[3];
const int input_width = channel_last ? input.dims()[3] : input.dims()[4];
const int output_channels =
channel_last ? output.dims()[4] : output.dims()[1];
const int output_depth = channel_last ? output.dims()[1] : output.dims()[2];
const int output_height =
channel_last ? output.dims()[2] : output.dims()[3];
const int output_width = channel_last ? output.dims()[3] : output.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DGrad<T><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, output_data, output_grad_data, input_channels,
input_depth, input_height, input_width, output_depth, output_height,
output_width, ksize_depth, ksize_height, ksize_width, stride_depth,
stride_height, stride_width, padding_depth, padding_height,
padding_width, input_grad_data, channel_last); // add channel_last
}
};
template class Pool3dDirectCUDAFunctor<paddle::operators::math::MaxPool<float>,
float>;
template class Pool3dDirectCUDAFunctor<paddle::operators::math::AvgPool<float>,
float>;
template class MaxPool3dGradFunctor<platform::CUDADeviceContext, float>;
template class MaxPool3dGradFunctor<platform::CUDADeviceContext, double>;
template class MaxPool3dGradFunctor<platform::CUDADeviceContext,
paddle::platform::float16>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<float>, float>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<float>, float>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<float>,
float>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<float>,
float>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPool<double>, double>;
template class Pool3dFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPool<double>, double>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<double>,
double>;
template class Pool3dGradFunctor<platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<double>,
double>;
template class Pool3dFunctor<
platform::CUDADeviceContext,
paddle::operators::math::MaxPool<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool3dFunctor<
platform::CUDADeviceContext,
paddle::operators::math::AvgPool<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool3dGradFunctor<
platform::CUDADeviceContext,
paddle::operators::math::MaxPoolGrad<paddle::platform::float16>,
paddle::platform::float16>;
template class Pool3dGradFunctor<
platform::CUDADeviceContext,
paddle::operators::math::AvgPoolGrad<paddle::platform::float16>,
paddle::platform::float16>;
template <typename T1, typename T2>
__global__ void KernelMaxPool2dWithIdx(
const int nthreads, const T1* input_data, const int channels,
const int input_height, const int input_width, const int output_height,
const int output_width, const int ksize_height, const int ksize_width,
const int stride_height, const int stride_width, const int padding_height,
const int padding_width, bool adaptive, T1* output_data, T2* mask_data,
FastDivModForPooling divmods) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int hstart, hend, wstart, wend;
int w_offset, h_offset, c_offset, input_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(
index, false, divmods, 0, 0, input_width, input_height, &w_offset,
&h_offset, &c_offset, &input_offset);
input_data += input_offset;
if (adaptive) {
hstart = AdaptStartIndex(h_offset, input_height, output_height);
hend = AdaptEndIndex(h_offset, input_height, output_height);
wstart = AdaptStartIndex(w_offset, input_width, output_width);
wend = AdaptEndIndex(w_offset, input_width, output_width);
} else {
hstart = h_offset * stride_height - padding_height;
hend = min(hstart + ksize_height, input_height);
hstart = max(hstart, 0);
wstart = w_offset * stride_width - padding_width;
wend = min(wstart + ksize_width, input_width);
wstart = max(wstart, 0);
}
T1 ele = -FLT_MAX;
int max_index = -1;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * input_width + w;
if (ele < input_data[input_index]) {
max_index = input_index;
ele = input_data[input_index];
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T1, typename T2>
__global__ void KernelMaxPool2DWithIdxGrad(
const int nthreads, const T1* output_grad, const T2* mask_data,
const int channels, const int input_height, const int input_width,
const int output_height, const int output_width, const int ksize_height,
const int ksize_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, bool adaptive,
T1* input_grad, FastDivModForPooling divmods) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int phstart, phend, pwstart, pwend;
int w_offset, h_offset, c_offset, output_offset;
OffsetPreparationFor4Dimension<FastDivModForPooling>(
index, false, divmods, 0, 0, output_width, output_height, &w_offset,
&h_offset, &c_offset, &output_offset);
mask_data += output_offset;
output_grad += output_offset;
if (adaptive) {
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
phstart =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
pwstart =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
phend =
min((h_offset + padding_height) / stride_height + 1, output_height);
pwend = min((w_offset + padding_width) / stride_width + 1, output_width);
}
T1 input_grad_data = 0;
int input_current_featuremap_idx = h_offset * input_width + w_offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_data[ph * output_width + pw] == input_current_featuremap_idx)
input_grad_data += output_grad[ph * output_width + pw];
}
}
input_grad[index] = input_grad_data;
}
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* output, framework::Tensor* mask) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace());
T2* mask_data = mask->mutable_data<T2>(context.GetPlace());
int nthreads = batch_size * output_channels * output_height * output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, output_width, output_height);
KernelMaxPool2dWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_height, input_width,
output_height, output_width, ksize_height, ksize_width, stride_height,
stride_width, padding_height, padding_width, adaptive, output_data,
mask_data, pool_divmods);
}
};
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1];
const int input_height = input_grad->dims()[2];
const int input_width = input_grad->dims()[3];
const int output_height = output_grad.dims()[2];
const int output_width = output_grad.dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const T2* mask_data = mask.data<T2>();
const T1* output_grad_data = output_grad.data<T1>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace());
int nthreads = batch_size * input_channels * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
auto pool_divmods =
FastDivModForPooling(input_channels, input_width, input_height);
KernelMaxPool2DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads, output_grad_data, mask_data, input_channels, input_height,
input_width, output_height, output_width, ksize_height, ksize_width,
stride_height, stride_width, padding_height, padding_width, adaptive,
input_grad_data, pool_divmods);
}
};
template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool2dWithIndexFunctor<platform::CUDADeviceContext, double,
int>;
template class MaxPool2dWithIndexGradFunctor<platform::CUDADeviceContext,
double, int>;
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdx(
const int nthreads, const T1* input_data, const int channels,
const int input_depth, const int input_height, const int input_width,
const int output_depth, const int output_height, const int output_width,
const int ksize_depth, const int ksize_height, const int ksize_width,
const int stride_depth, const int stride_height, const int stride_width,
const int padding_depth, const int padding_height, const int padding_width,
bool adaptive, T1* output_data, T2* mask_data) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int pw = index % output_width;
int ph = (index / output_width) % output_height;
int pd = (index / output_width / output_height) % output_depth;
int c = (index / output_width / output_height / output_depth) % channels;
int batch_idx =
index / output_width / output_height / output_depth / channels;
int dstart, dend;
int hstart, hend;
int wstart, wend;
if (adaptive) {
dstart = AdaptStartIndex(pd, input_depth, output_depth);
dend = AdaptEndIndex(pd, input_depth, output_depth);
hstart = AdaptStartIndex(ph, input_height, output_height);
hend = AdaptEndIndex(ph, input_height, output_height);
wstart = AdaptStartIndex(pw, input_width, output_width);
wend = AdaptEndIndex(pw, input_width, output_width);
} else {
dstart = pd * stride_depth - padding_depth;
hstart = ph * stride_height - padding_height;
wstart = pw * stride_width - padding_width;
dend = min(dstart + ksize_depth, input_depth);
hend = min(hstart + ksize_height, input_height);
wend = min(wstart + ksize_width, input_width);
dstart = max(dstart, 0);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
}
T1 ele = -FLT_MAX;
int max_index = -1;
input_data +=
(batch_idx * channels + c) * input_depth * input_height * input_width;
for (int d = dstart; d < dend; ++d) {
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (ele < input_data[(d * input_height + h) * input_width + w]) {
max_index = (d * input_height + h) * input_width + w;
ele = input_data[max_index];
}
}
}
}
output_data[index] = ele;
mask_data[index] = max_index;
}
}
template <typename T1, typename T2>
__global__ void KernelMaxPool3DWithIdxGrad(
const int nthreads, const T1* output_grad, const T2* mask,
const int channels, const int input_depth, const int input_height,
const int input_width, const int output_depth, const int output_height,
const int output_width, const int ksize_depth, const int ksize_height,
const int ksize_width, const int stride_depth, const int stride_height,
const int stride_width, const int padding_depth, const int padding_height,
const int padding_width, bool adaptive, T1* input_grad) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads;
index += blockDim.x * gridDim.x) {
int w_offset = index % input_width;
int h_offset = (index / input_width) % input_height;
int d_offset = (index / input_width / input_height) % input_depth;
int c_offset =
(index / input_width / input_height / input_depth) % channels;
int batch_idx = index / input_width / input_height / input_depth / channels;
int pdstart, pdend;
int phstart, phend;
int pwstart, pwend;
if (adaptive) {
pdstart = d_offset * output_depth / input_depth;
pdend =
min((d_offset + 1) * output_depth / input_depth + 1, output_depth);
phstart = h_offset * output_height / input_height;
phend =
min((h_offset + 1) * output_height / input_height + 1, output_height);
pwstart = w_offset * output_width / input_width;
pwend =
min((w_offset + 1) * output_width / input_width + 1, output_width);
} else {
pdstart =
(d_offset + padding_depth < ksize_depth)
? 0
: (d_offset + padding_depth - ksize_depth) / stride_depth + 1;
phstart =
(h_offset + padding_height < ksize_height)
? 0
: (h_offset + padding_height - ksize_height) / stride_height + 1;
pwstart =
(w_offset + padding_width < ksize_width)
? 0
: (w_offset + padding_width - ksize_width) / stride_width + 1;
pdend = min((d_offset + padding_depth) / stride_depth + 1, output_depth);
phend =
min((h_offset + padding_height) / stride_height + 1, output_height);
pwend = min((w_offset + padding_width) / stride_width + 1, output_width);
}
T1 input_grad_data = 0;
int input_current_feature_map_idx =
(d_offset * input_height + h_offset) * input_width + w_offset;
int output_idx = (batch_idx * channels + c_offset) * output_depth *
output_height * output_width;
mask += output_idx;
output_grad += output_idx;
for (int pd = pdstart; pd < pdend; ++pd) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[(pd * output_height + ph) * output_width + pw] ==
input_current_feature_map_idx)
input_grad_data +=
output_grad[(pd * output_height + ph) * output_width + pw];
}
}
}
input_grad[index] = input_grad_data;
}
}
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* output, framework::Tensor* mask) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T1* input_data = input.data<T1>();
T1* output_data = output->mutable_data<T1>(context.GetPlace());
T2* mask_data = mask->mutable_data<T2>(context.GetPlace());
int nthreads = batch_size * output_channels * output_depth * output_height *
output_width;
int thread_num = 1024;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(context, &thread_num);
#endif
int blocks = (nthreads + thread_num - 1) / thread_num;
dim3 threads(thread_num, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DWithIdx<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads, input_data, input_channels, input_depth, input_height,
input_width, output_depth, output_height, output_width, ksize_depth,
ksize_height, ksize_width, stride_depth, stride_height, stride_width,
padding_depth, padding_height, padding_width, adaptive, output_data,
mask_data);
}
};
/*
* All tensors are in NCDHW format.
* Ksize, strides, paddings are three elements. These three elements represent
* depth, height and width, respectively.
*/
template <typename T1, typename T2>
class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, T1, T2> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& output_grad,
const framework::Tensor& mask, const std::vector<int>& ksize,
const std::vector<int>& strides,
const std::vector<int>& paddings, bool adaptive,
framework::Tensor* input_grad) {
const int batch_size = input_grad->dims()[0];
const int input_channels = input_grad->dims()[1];
const int input_depth = input_grad->dims()[2];
const int input_height = input_grad->dims()[3];
const int input_width = input_grad->dims()[4];
const int output_depth = output_grad.dims()[2];
const int output_height = output_grad.dims()[3];
const int output_width = output_grad.dims()[4];
const int ksize_depth = ksize[0];
const int ksize_height = ksize[1];
const int ksize_width = ksize[2];
const int stride_depth = strides[0];
const int stride_height = strides[1];
const int stride_width = strides[2];
const int padding_depth = paddings[0];
const int padding_height = paddings[1];
const int padding_width = paddings[2];
const T1* output_grad_data = output_grad.data<T1>();
const T2* mask_data = mask.data<T2>();
T1* input_grad_data = input_grad->mutable_data<T1>(context.GetPlace());
int nthreads =
batch_size * input_channels * input_depth * input_height * input_width;
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxPool3DWithIdxGrad<T1, T2><<<grid, threads, 0, context.stream()>>>(
nthreads, output_grad_data, mask_data, input_channels, input_depth,
input_height, input_width, output_depth, output_height, output_width,
ksize_depth, ksize_height, ksize_width, stride_depth, stride_height,
stride_width, padding_depth, padding_height, padding_width, adaptive,
input_grad_data);
}
};
template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext, float,
int>;
template class MaxPool3dWithIndexFunctor<platform::CUDADeviceContext, double,
int>;
template class MaxPool3dWithIndexGradFunctor<platform::CUDADeviceContext,
double, int>;
} // namespace math
} // namespace operators
} // namespace paddle
|
4836b7530a3e3071108089bd23c809e903167069.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
// TODO: __global__
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO
printf("TODO\n");
}
}
}
|
4836b7530a3e3071108089bd23c809e903167069.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
// TODO: __global__
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO
printf("TODO\n");
}
}
}
|
8bff91777995cf4216757e48542f5f8fa8b6e201.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHBlas.h"
#include "THHTensorCopy.h"
#include "THHTensorRandom.h"
#include "THHApply.cuh"
#include "THHReduce.cuh"
#include "THHDeviceUtils.cuh"
#include <algorithm> // for std::min
__global__ void THCudaTensor_kernel_indexFill(
float *tensor, long* stride, float *index, long src_nDim,
int dim, long idx_size, long tensor_size, long size_dim, float val
)
{
int thread_idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
long flat_size = tensor_size / idx_size;
if (thread_idx < flat_size)
{
long coeff = 0;
for (int i=0; i<idx_size; i++)
{
int leftover = thread_idx;
int srcIdx = 0;
for (int d=0; d<src_nDim; d++)
{
if (d < dim)
{
coeff = leftover / (stride[d] / size_dim);
leftover -= coeff * (stride[d] / size_dim);
srcIdx += coeff * stride[d];
}
else if (d > dim)
{
coeff = leftover / stride[d];
leftover -= coeff * stride[d];
srcIdx += coeff * stride[d];
}
}
tensor[srcIdx + (int)((index[i])-1)*stride[dim]] = val;
}
}
}
__global__ void THCudaTensor_kernel_indexCopy(
float *res, float *src, long* res_stride, float *index,
long res_nDim, int dim, long idx_size, long src_size, long size_dim
)
{
int thread_idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
long flat_size = src_size / idx_size;
if (thread_idx < flat_size)
{
long coeff = 0;
for (int i=0; i<idx_size; i++)
{
int leftover = thread_idx;
int targetIdx = 0;
int resIdx = 0;
for (int d=0; d<res_nDim; d++)
{
if (d < dim)
{
long stride_d = res_stride[d] / size_dim;
coeff = leftover / stride_d;
leftover -= coeff * stride_d;
targetIdx += coeff * stride_d * idx_size;
resIdx += coeff * res_stride[d];
}
else if (d > dim)
{
coeff = leftover / res_stride[d];
leftover -= coeff * res_stride[d];
targetIdx += coeff * res_stride[d];
resIdx += coeff * res_stride[d];
}
}
res[resIdx + ((int)(index[i])-1)*res_stride[dim]] = src[targetIdx + i*res_stride[dim]];
}
}
}
void THCudaTensor_indexCopy_long(THCState *state, THCudaTensor *res_, int dim, THLongTensor *indices, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 1, res_));
THCudaTensor *indices_ = THCudaTensor_newWithSize1d(state, indices->size[0]);
THCudaTensor_copyLong(state, indices_, indices);
THCudaTensor_indexCopy(state, res_, dim, indices_, src);
THCudaTensor_free(state, indices_);
}
void THCudaTensor_indexCopy(THCState *state, THCudaTensor *res_, int dim, THCudaTensor *indices, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, res_, src));
long *stride_;
long nIndex = indices->size[0];
long nRes;
THArgCheck(indices->nDimension == 1, 3, "expecting vector of indices");
THArgCheck(dim < src->nDimension, 4, "Indexing dim is out of bounds");
THArgCheck(src->nDimension > 0, 2, "Source tensor is empty");
THArgCheck(nIndex == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices");
src = THCudaTensor_newContiguous(state, src);
indices = THCudaTensor_newContiguous(state, indices);
nRes = THCudaTensor_nElement(state, res_);
dim3 nthreads(16, 16);
dim3 nblocks(ceil((float)nRes / nIndex / (16*16)));
THCudaCheck(THCudaMalloc(state, (void**)&stride_, res_->nDimension * sizeof(long)));
THCudaCheck(hipMemcpy(stride_, res_->stride, res_->nDimension * sizeof(long), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( THCudaTensor_kernel_indexCopy), dim3(nblocks), dim3(nthreads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, res_), THCudaTensor_data(state, src),
stride_, THCudaTensor_data(state, indices),
res_->nDimension, dim, nIndex,
THCudaTensor_nElement(state, src), res_->size[dim]
);
THCudaCheck(THCudaFree(state, stride_));
THCudaTensor_free(state, indices);
THCudaTensor_free(state, src);
}
void THCudaTensor_indexFill_long(THCState *state, THCudaTensor *res_, int dim, THLongTensor *indices, float val)
{
THAssert(THCudaTensor_checkGPU(state, 1, res_));
THCudaTensor *indices_ = THCudaTensor_newWithSize1d(state, indices->size[0]);
THCudaTensor_copyLong(state, indices_, indices);
THCudaTensor_indexFill(state, res_, dim, indices_, val);
THCudaTensor_free(state, indices_);
}
void THCudaTensor_indexFill(THCState *state, THCudaTensor *res_, int dim, THCudaTensor *indices, float val)
{
THAssert(THCudaTensor_checkGPU(state, 1, res_));
long *stride_;
long nIndex = indices->size[0];
long nRes;
THArgCheck(indices->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < res_->nDimension,4,"Indexing dim is out of bounds");
THArgCheck(res_->nDimension > 0, 2, "Source tensor is empty");
nRes = THCudaTensor_nElement(state, res_) / res_->size[dim] * nIndex;
indices = THCudaTensor_newContiguous(state, indices);
dim3 nthreads(16, 16);
dim3 nblocks(ceil((float)nRes / nIndex / (16*16)));
THCudaCheck(THCudaMalloc(state, (void**)&stride_, res_->nDimension * sizeof(long)));
THCudaCheck(hipMemcpy(stride_, res_->stride, res_->nDimension * sizeof(long), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( THCudaTensor_kernel_indexFill), dim3(nblocks), dim3(nthreads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, res_), stride_, THCudaTensor_data(state, indices),
res_->nDimension, dim, nIndex, nRes, res_->size[dim], val
);
THCudaCheck(THCudaFree(state, stride_));
THCudaTensor_free(state, indices);
}
__global__ void THCudaTensor_kernel_indexSelect_contiguous(
float *tensor, float *src, long stride, float *index, long idxSize)
{
// In the typical case, each block of 128 threads handles a 4x128
// section of the output with each warp handling a single 1x128 row.
// The outer loops handle inputs larger than 4*65535 or strides larger
// than 128*65535.
const int VT = 4;
const int WARP_SIZE = 32;
const int MAX_DIM_SIZE = 65535;
for (int idx = blockIdx.x * blockDim.y + threadIdx.y; idx < idxSize; idx += blockDim.y * MAX_DIM_SIZE) {
for (int startIdx = threadIdx.x + blockIdx.y * VT*WARP_SIZE; startIdx < stride; startIdx += VT*WARP_SIZE*MAX_DIM_SIZE) {
const int srcIdx = ((int) index[idx] - 1) * stride;
const int targetIdx = idx * stride;
#pragma unroll
for (int i = 0; i < VT; i++) {
const int featureIdx = startIdx + i * WARP_SIZE;
if (featureIdx < stride) {
tensor[targetIdx + featureIdx] = src[srcIdx + featureIdx];
}
}
}
}
}
__global__ void THCudaTensor_kernel_indexSelect(
TensorInfo<unsigned long> dest,
TensorInfo<unsigned long> src,
TensorInfo<unsigned long> indices,
long totalSize,
int indexDim
)
{
for (unsigned long linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
unsigned long destOffset =
IndexToOffset<unsigned long, -1>::get(linearIndex, dest);
// Calculate the index in the dimension we're selecting in.
unsigned long offset = destOffset;
// In the process of doing so, we'll calculate indices in all lower
// dimensions. We need to save these so we can reconstruct a linear index in
// the source tensor. MAX_CUTORCH_DIMS is usually an overestimate (we only
// need to save dims - indexDim indices) but this avoids dynamic memory
// allocation.
unsigned long unraveledIndices[MAX_CUTORCH_DIMS];
for (int i = dest.dims - 1; i >= indexDim; --i) {
unraveledIndices[i] = offset % dest.sizes[i];
offset /= dest.sizes[i];
}
unsigned long destSliceIndex = unraveledIndices[indexDim];
unsigned long destSliceOffset =
IndexToOffset<unsigned long, 1>::get(destSliceIndex, indices);
unsigned long srcSliceIndex =
(unsigned long)indices.data[destSliceOffset] - 1;
// Rebuild index in the source tensor by doing the reverse of the above
unsigned long srcIndex = offset * src.sizes[indexDim] + srcSliceIndex;
for (int i = indexDim + 1; i < dest.dims; ++i) {
srcIndex = srcIndex * src.sizes[i] + unraveledIndices[i];
}
unsigned long srcOffset =
IndexToOffset<unsigned long, -1>::get(srcIndex, src);
dest.data[destOffset] = src.data[srcOffset];
}
}
void THCudaTensor_indexSelect_long(THCState *state, THCudaTensor *res_, THCudaTensor *src, int dim, THLongTensor *indices)
{
THAssert(THCudaTensor_checkGPU(state, 2, res_, src));
THCudaTensor *indices_ = THCudaTensor_newWithSize1d(state, indices->size[0]);
THCudaTensor_copyLong(state, indices_, indices);
THCudaTensor_indexSelect(state, res_, src, dim, indices_);
THCudaTensor_free(state, indices_);
}
void THCudaTensor_indexSelect(THCState *state, THCudaTensor *res, THCudaTensor *src, int dim, THCudaTensor *indices)
{
THAssert(THCudaTensor_checkGPU(state, 2, res, src));
THCCheckTensorDims(state, res, 2);
THCCheckTensorDims(state, src, 3);
THCCheckTensorDims(state, indices, 5);
long nIndex = THCudaTensor_size(state, indices, 0);
long srcDims = THCudaTensor_nDimension(state, src);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize = THCudaTensor_newSizeOf(state, src);
THLongStorage_set(newSize, dim, nIndex);
THCudaTensor_resize(state, res, newSize, NULL);
THLongStorage_free(newSize);
if (THCudaTensor_isContiguous(state, src) &&
THCudaTensor_isContiguous(state, res) &&
THCudaTensor_isContiguous(state, indices) &&
dim == 0)
{
long stride = THCudaTensor_stride(state, src, 0);
int blockX = ::min(THCCeilDiv(nIndex, 4L), 65535L);
int blockY = ::min(THCCeilDiv(stride, 128L), 65535L);
dim3 nthreads(32, 4);
dim3 nblocks(blockX, blockY);
hipLaunchKernelGGL(( THCudaTensor_kernel_indexSelect_contiguous), dim3(nblocks), dim3(nthreads), 0, stream,
THCudaTensor_data(state, res),
THCudaTensor_data(state, src),
stride,
THCudaTensor_data(state, indices),
nIndex);
return;
}
long nRes = THCudaTensor_nElement(state, res);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
dim3 nthreads(::min(nRes, 128L));
dim3 nblocks(::min(THCCeilDiv(nRes, 128L), (long)(mpc * 8)));
TensorInfo<unsigned long> destInfo(state, res, NoCollapseDims);
TensorInfo<unsigned long> srcInfo(state, src, NoCollapseDims);
TensorInfo<unsigned long> indicesInfo(state, indices);
hipLaunchKernelGGL(( THCudaTensor_kernel_indexSelect), dim3(nthreads), dim3(nblocks), 0, stream,
destInfo,
srcInfo,
indicesInfo,
nRes,
dim
);
}
|
8bff91777995cf4216757e48542f5f8fa8b6e201.cu
|
#include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCBlas.h"
#include "THCTensorCopy.h"
#include "THCTensorRandom.h"
#include "THCApply.cuh"
#include "THCReduce.cuh"
#include "THCDeviceUtils.cuh"
#include <algorithm> // for std::min
__global__ void THCudaTensor_kernel_indexFill(
float *tensor, long* stride, float *index, long src_nDim,
int dim, long idx_size, long tensor_size, long size_dim, float val
)
{
int thread_idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
long flat_size = tensor_size / idx_size;
if (thread_idx < flat_size)
{
long coeff = 0;
for (int i=0; i<idx_size; i++)
{
int leftover = thread_idx;
int srcIdx = 0;
for (int d=0; d<src_nDim; d++)
{
if (d < dim)
{
coeff = leftover / (stride[d] / size_dim);
leftover -= coeff * (stride[d] / size_dim);
srcIdx += coeff * stride[d];
}
else if (d > dim)
{
coeff = leftover / stride[d];
leftover -= coeff * stride[d];
srcIdx += coeff * stride[d];
}
}
tensor[srcIdx + (int)((index[i])-1)*stride[dim]] = val;
}
}
}
__global__ void THCudaTensor_kernel_indexCopy(
float *res, float *src, long* res_stride, float *index,
long res_nDim, int dim, long idx_size, long src_size, long size_dim
)
{
int thread_idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
long flat_size = src_size / idx_size;
if (thread_idx < flat_size)
{
long coeff = 0;
for (int i=0; i<idx_size; i++)
{
int leftover = thread_idx;
int targetIdx = 0;
int resIdx = 0;
for (int d=0; d<res_nDim; d++)
{
if (d < dim)
{
long stride_d = res_stride[d] / size_dim;
coeff = leftover / stride_d;
leftover -= coeff * stride_d;
targetIdx += coeff * stride_d * idx_size;
resIdx += coeff * res_stride[d];
}
else if (d > dim)
{
coeff = leftover / res_stride[d];
leftover -= coeff * res_stride[d];
targetIdx += coeff * res_stride[d];
resIdx += coeff * res_stride[d];
}
}
res[resIdx + ((int)(index[i])-1)*res_stride[dim]] = src[targetIdx + i*res_stride[dim]];
}
}
}
void THCudaTensor_indexCopy_long(THCState *state, THCudaTensor *res_, int dim, THLongTensor *indices, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 1, res_));
THCudaTensor *indices_ = THCudaTensor_newWithSize1d(state, indices->size[0]);
THCudaTensor_copyLong(state, indices_, indices);
THCudaTensor_indexCopy(state, res_, dim, indices_, src);
THCudaTensor_free(state, indices_);
}
void THCudaTensor_indexCopy(THCState *state, THCudaTensor *res_, int dim, THCudaTensor *indices, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, res_, src));
long *stride_;
long nIndex = indices->size[0];
long nRes;
THArgCheck(indices->nDimension == 1, 3, "expecting vector of indices");
THArgCheck(dim < src->nDimension, 4, "Indexing dim is out of bounds");
THArgCheck(src->nDimension > 0, 2, "Source tensor is empty");
THArgCheck(nIndex == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices");
src = THCudaTensor_newContiguous(state, src);
indices = THCudaTensor_newContiguous(state, indices);
nRes = THCudaTensor_nElement(state, res_);
dim3 nthreads(16, 16);
dim3 nblocks(ceil((float)nRes / nIndex / (16*16)));
THCudaCheck(THCudaMalloc(state, (void**)&stride_, res_->nDimension * sizeof(long)));
THCudaCheck(cudaMemcpy(stride_, res_->stride, res_->nDimension * sizeof(long), cudaMemcpyHostToDevice));
THCudaTensor_kernel_indexCopy<<<nblocks, nthreads, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, res_), THCudaTensor_data(state, src),
stride_, THCudaTensor_data(state, indices),
res_->nDimension, dim, nIndex,
THCudaTensor_nElement(state, src), res_->size[dim]
);
THCudaCheck(THCudaFree(state, stride_));
THCudaTensor_free(state, indices);
THCudaTensor_free(state, src);
}
void THCudaTensor_indexFill_long(THCState *state, THCudaTensor *res_, int dim, THLongTensor *indices, float val)
{
THAssert(THCudaTensor_checkGPU(state, 1, res_));
THCudaTensor *indices_ = THCudaTensor_newWithSize1d(state, indices->size[0]);
THCudaTensor_copyLong(state, indices_, indices);
THCudaTensor_indexFill(state, res_, dim, indices_, val);
THCudaTensor_free(state, indices_);
}
void THCudaTensor_indexFill(THCState *state, THCudaTensor *res_, int dim, THCudaTensor *indices, float val)
{
THAssert(THCudaTensor_checkGPU(state, 1, res_));
long *stride_;
long nIndex = indices->size[0];
long nRes;
THArgCheck(indices->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < res_->nDimension,4,"Indexing dim is out of bounds");
THArgCheck(res_->nDimension > 0, 2, "Source tensor is empty");
nRes = THCudaTensor_nElement(state, res_) / res_->size[dim] * nIndex;
indices = THCudaTensor_newContiguous(state, indices);
dim3 nthreads(16, 16);
dim3 nblocks(ceil((float)nRes / nIndex / (16*16)));
THCudaCheck(THCudaMalloc(state, (void**)&stride_, res_->nDimension * sizeof(long)));
THCudaCheck(cudaMemcpy(stride_, res_->stride, res_->nDimension * sizeof(long), cudaMemcpyHostToDevice));
THCudaTensor_kernel_indexFill<<<nblocks, nthreads, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, res_), stride_, THCudaTensor_data(state, indices),
res_->nDimension, dim, nIndex, nRes, res_->size[dim], val
);
THCudaCheck(THCudaFree(state, stride_));
THCudaTensor_free(state, indices);
}
__global__ void THCudaTensor_kernel_indexSelect_contiguous(
float *tensor, float *src, long stride, float *index, long idxSize)
{
// In the typical case, each block of 128 threads handles a 4x128
// section of the output with each warp handling a single 1x128 row.
// The outer loops handle inputs larger than 4*65535 or strides larger
// than 128*65535.
const int VT = 4;
const int WARP_SIZE = 32;
const int MAX_DIM_SIZE = 65535;
for (int idx = blockIdx.x * blockDim.y + threadIdx.y; idx < idxSize; idx += blockDim.y * MAX_DIM_SIZE) {
for (int startIdx = threadIdx.x + blockIdx.y * VT*WARP_SIZE; startIdx < stride; startIdx += VT*WARP_SIZE*MAX_DIM_SIZE) {
const int srcIdx = ((int) index[idx] - 1) * stride;
const int targetIdx = idx * stride;
#pragma unroll
for (int i = 0; i < VT; i++) {
const int featureIdx = startIdx + i * WARP_SIZE;
if (featureIdx < stride) {
tensor[targetIdx + featureIdx] = src[srcIdx + featureIdx];
}
}
}
}
}
__global__ void THCudaTensor_kernel_indexSelect(
TensorInfo<unsigned long> dest,
TensorInfo<unsigned long> src,
TensorInfo<unsigned long> indices,
long totalSize,
int indexDim
)
{
for (unsigned long linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
unsigned long destOffset =
IndexToOffset<unsigned long, -1>::get(linearIndex, dest);
// Calculate the index in the dimension we're selecting in.
unsigned long offset = destOffset;
// In the process of doing so, we'll calculate indices in all lower
// dimensions. We need to save these so we can reconstruct a linear index in
// the source tensor. MAX_CUTORCH_DIMS is usually an overestimate (we only
// need to save dims - indexDim indices) but this avoids dynamic memory
// allocation.
unsigned long unraveledIndices[MAX_CUTORCH_DIMS];
for (int i = dest.dims - 1; i >= indexDim; --i) {
unraveledIndices[i] = offset % dest.sizes[i];
offset /= dest.sizes[i];
}
unsigned long destSliceIndex = unraveledIndices[indexDim];
unsigned long destSliceOffset =
IndexToOffset<unsigned long, 1>::get(destSliceIndex, indices);
unsigned long srcSliceIndex =
(unsigned long)indices.data[destSliceOffset] - 1;
// Rebuild index in the source tensor by doing the reverse of the above
unsigned long srcIndex = offset * src.sizes[indexDim] + srcSliceIndex;
for (int i = indexDim + 1; i < dest.dims; ++i) {
srcIndex = srcIndex * src.sizes[i] + unraveledIndices[i];
}
unsigned long srcOffset =
IndexToOffset<unsigned long, -1>::get(srcIndex, src);
dest.data[destOffset] = src.data[srcOffset];
}
}
void THCudaTensor_indexSelect_long(THCState *state, THCudaTensor *res_, THCudaTensor *src, int dim, THLongTensor *indices)
{
THAssert(THCudaTensor_checkGPU(state, 2, res_, src));
THCudaTensor *indices_ = THCudaTensor_newWithSize1d(state, indices->size[0]);
THCudaTensor_copyLong(state, indices_, indices);
THCudaTensor_indexSelect(state, res_, src, dim, indices_);
THCudaTensor_free(state, indices_);
}
void THCudaTensor_indexSelect(THCState *state, THCudaTensor *res, THCudaTensor *src, int dim, THCudaTensor *indices)
{
THAssert(THCudaTensor_checkGPU(state, 2, res, src));
THCCheckTensorDims(state, res, 2);
THCCheckTensorDims(state, src, 3);
THCCheckTensorDims(state, indices, 5);
long nIndex = THCudaTensor_size(state, indices, 0);
long srcDims = THCudaTensor_nDimension(state, src);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize = THCudaTensor_newSizeOf(state, src);
THLongStorage_set(newSize, dim, nIndex);
THCudaTensor_resize(state, res, newSize, NULL);
THLongStorage_free(newSize);
if (THCudaTensor_isContiguous(state, src) &&
THCudaTensor_isContiguous(state, res) &&
THCudaTensor_isContiguous(state, indices) &&
dim == 0)
{
long stride = THCudaTensor_stride(state, src, 0);
int blockX = std::min(THCCeilDiv(nIndex, 4L), 65535L);
int blockY = std::min(THCCeilDiv(stride, 128L), 65535L);
dim3 nthreads(32, 4);
dim3 nblocks(blockX, blockY);
THCudaTensor_kernel_indexSelect_contiguous<<<nblocks, nthreads, 0, stream>>>(
THCudaTensor_data(state, res),
THCudaTensor_data(state, src),
stride,
THCudaTensor_data(state, indices),
nIndex);
return;
}
long nRes = THCudaTensor_nElement(state, res);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
dim3 nthreads(std::min(nRes, 128L));
dim3 nblocks(std::min(THCCeilDiv(nRes, 128L), (long)(mpc * 8)));
TensorInfo<unsigned long> destInfo(state, res, NoCollapseDims);
TensorInfo<unsigned long> srcInfo(state, src, NoCollapseDims);
TensorInfo<unsigned long> indicesInfo(state, indices);
THCudaTensor_kernel_indexSelect<<<nthreads, nblocks, 0, stream>>>(
destInfo,
srcInfo,
indicesInfo,
nRes,
dim
);
}
|
ed193caf0b56a6546104964c3a7e90318f72a925.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "solvers/adam_solver.h"
#include "core/common_cu.h"
#include "nodes/variable.h"
#include <glog/logging.h>
__global__
void AdamFillKernel(const int n, const float value, float *dst)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
dst[i] = value;
}
__global__
void AdamKernel(const int n, float *w, const float *g, float *m, float *v, const float beta1, const float beta2, const float eps, const float learning_rate, const bool dry_run)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
float gi = g[i];
float mi = m[i] = m[i] * beta1 + gi*(1 - beta1);
float vi = v[i] = v[i] * beta2 + gi*gi*(1 - beta2);
if (!dry_run)
w[i] -= learning_rate * mi / (sqrt(vi) + eps);
}
}
AdamSolver::AdamSolver(deepflow::SolverParam *param) : Solver(param) {
LOG_IF(FATAL, param->has_adam_solver() == false) << "param.has_adam_solver() == false";
_my_param = param->mutable_adam_solver();
_learning_rate = param->learning_rate();
}
void AdamSolver::apply(std::shared_ptr<Variable> var) {
auto context = var->executionContext();
bool verbos = (context && context->debug_level > 2) ? true : false;
bool dry_run = false;
if (_initialized == false) {
LOG_IF(INFO, verbos) << "solver " << name() << " for variable " << var->name();
init(var);
dry_run = true;
}
if (!_enabled)
return;
auto size = var->output(0)->value()->size();
double beta1 = _my_param->beta1();
double beta2 = _my_param->beta2();
double iter = context->current_iteration + 1;
float corrected_lr = (float)((double)_learning_rate * std::sqrt(1.0 - pow(beta2, iter)) / (1.0 - pow(beta1, iter)));
LOG_IF(INFO, verbos) << "applying solver " << name() << " on " << var->name() << " | lr: " << corrected_lr;
AdamKernel << <numOfBlocks(size), maxThreadsPerBlock, 0 >> > (size, (float*)var->output(0)->value()->gpu_data(), (float*)var->gradients(), _m, _v, _my_param->beta1(), _my_param->beta2(), _my_param->eps(), corrected_lr, dry_run);
DF_KERNEL_CHECK();
var->reset_gradients();
}
void AdamSolver::init(std::shared_ptr<Variable> var) {
auto size = var->output(0)->value()->size();
auto sizeInBytes = var->output(0)->value()->bytes();
DF_CUDA_CHECK(hipMalloc(&_m, sizeInBytes));
DF_CUDA_CHECK(hipMemset(_m, 0, sizeInBytes));
DF_CUDA_CHECK(hipMalloc(&_v, sizeInBytes));
AdamFillKernel << < numOfBlocks(size), maxThreadsPerBlock >> > (size, 1, _v);
DF_KERNEL_CHECK();
_initialized = true;
}
std::string AdamSolver::to_cpp() const
{
std::string cpp = "auto " + name() + " = df.adam_solver(";
cpp += std::to_string(_param->learning_rate()) + ", ";
cpp += std::to_string(_my_param->beta1()) + ", ";
cpp += std::to_string(_my_param->beta2()) + ", ";
cpp += std::to_string(_my_param->eps()) + ", ";
cpp += "\"" + name() + "\"";
cpp += ");";
return cpp;
}
|
ed193caf0b56a6546104964c3a7e90318f72a925.cu
|
#include "solvers/adam_solver.h"
#include "core/common_cu.h"
#include "nodes/variable.h"
#include <glog/logging.h>
__global__
void AdamFillKernel(const int n, const float value, float *dst)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n)
dst[i] = value;
}
__global__
void AdamKernel(const int n, float *w, const float *g, float *m, float *v, const float beta1, const float beta2, const float eps, const float learning_rate, const bool dry_run)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
float gi = g[i];
float mi = m[i] = m[i] * beta1 + gi*(1 - beta1);
float vi = v[i] = v[i] * beta2 + gi*gi*(1 - beta2);
if (!dry_run)
w[i] -= learning_rate * mi / (sqrt(vi) + eps);
}
}
AdamSolver::AdamSolver(deepflow::SolverParam *param) : Solver(param) {
LOG_IF(FATAL, param->has_adam_solver() == false) << "param.has_adam_solver() == false";
_my_param = param->mutable_adam_solver();
_learning_rate = param->learning_rate();
}
void AdamSolver::apply(std::shared_ptr<Variable> var) {
auto context = var->executionContext();
bool verbos = (context && context->debug_level > 2) ? true : false;
bool dry_run = false;
if (_initialized == false) {
LOG_IF(INFO, verbos) << "solver " << name() << " for variable " << var->name();
init(var);
dry_run = true;
}
if (!_enabled)
return;
auto size = var->output(0)->value()->size();
double beta1 = _my_param->beta1();
double beta2 = _my_param->beta2();
double iter = context->current_iteration + 1;
float corrected_lr = (float)((double)_learning_rate * std::sqrt(1.0 - pow(beta2, iter)) / (1.0 - pow(beta1, iter)));
LOG_IF(INFO, verbos) << "applying solver " << name() << " on " << var->name() << " | lr: " << corrected_lr;
AdamKernel << <numOfBlocks(size), maxThreadsPerBlock, 0 >> > (size, (float*)var->output(0)->value()->gpu_data(), (float*)var->gradients(), _m, _v, _my_param->beta1(), _my_param->beta2(), _my_param->eps(), corrected_lr, dry_run);
DF_KERNEL_CHECK();
var->reset_gradients();
}
void AdamSolver::init(std::shared_ptr<Variable> var) {
auto size = var->output(0)->value()->size();
auto sizeInBytes = var->output(0)->value()->bytes();
DF_CUDA_CHECK(cudaMalloc(&_m, sizeInBytes));
DF_CUDA_CHECK(cudaMemset(_m, 0, sizeInBytes));
DF_CUDA_CHECK(cudaMalloc(&_v, sizeInBytes));
AdamFillKernel << < numOfBlocks(size), maxThreadsPerBlock >> > (size, 1, _v);
DF_KERNEL_CHECK();
_initialized = true;
}
std::string AdamSolver::to_cpp() const
{
std::string cpp = "auto " + name() + " = df.adam_solver(";
cpp += std::to_string(_param->learning_rate()) + ", ";
cpp += std::to_string(_my_param->beta1()) + ", ";
cpp += std::to_string(_my_param->beta2()) + ", ";
cpp += std::to_string(_my_param->eps()) + ", ";
cpp += "\"" + name() + "\"";
cpp += ");";
return cpp;
}
|
5effdb66bdf600ebec3cd23c9d0ce5fbec20d424.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TILE_DIM 32
template<typename T>
__device__ void vectorDotVector(const T* A, const T* B, T* result, const int length) {
__shared__ T a_tile[TILE_DIM];
__shared__ T b_tile[TILE_DIM];
__shared__ T result_tile[TILE_DIM];
for (int i = 0; i < TILE_DIM; i++) {
result_tile[i] = 0;
}
int tx = threadIdx.x;
for (int t = 0; t < (length - 1) / TILE_DIM + 1; t++) {
int index = t * TILE_DIM + tx;
if (index < length) {
a_tile[tx] = A[index];
b_tile[tx] = B[index];
} else {
a_tile[tx] = 0;
b_tile[tx] = 0;
}
__syncthreads();
result_tile[tx] += a_tile[tx] * b_tile[tx];
__syncthreads();
}
T resultValue = 0;
if (tx == 0) {
for (int i = 0; i < TILE_DIM; i++) {
resultValue += result_tile[i];
}
result[0] = resultValue;
}
}
template<typename T>
__device__ void columnDotRow(const T* vectorA, const T* vectorB, T* resultMatrix,
const int lengthA, const int lengthB) {
__shared__ T vectorA_tile[TILE_DIM];
__shared__ T vectorB_tile[TILE_DIM];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (ty == 0) {
if (row + tx < lengthA) {
vectorA_tile[tx] = vectorA[row + tx];
}
if (col < lengthB) {
vectorB_tile[tx] = vectorB[col];
}
}
__syncthreads();
if (row < lengthA && col < lengthB) {
resultMatrix[row * lengthB + col] = vectorA_tile[ty] * vectorB_tile[tx];
}
}
|
5effdb66bdf600ebec3cd23c9d0ce5fbec20d424.cu
|
#define TILE_DIM 32
template<typename T>
__device__ void vectorDotVector(const T* A, const T* B, T* result, const int length) {
__shared__ T a_tile[TILE_DIM];
__shared__ T b_tile[TILE_DIM];
__shared__ T result_tile[TILE_DIM];
for (int i = 0; i < TILE_DIM; i++) {
result_tile[i] = 0;
}
int tx = threadIdx.x;
for (int t = 0; t < (length - 1) / TILE_DIM + 1; t++) {
int index = t * TILE_DIM + tx;
if (index < length) {
a_tile[tx] = A[index];
b_tile[tx] = B[index];
} else {
a_tile[tx] = 0;
b_tile[tx] = 0;
}
__syncthreads();
result_tile[tx] += a_tile[tx] * b_tile[tx];
__syncthreads();
}
T resultValue = 0;
if (tx == 0) {
for (int i = 0; i < TILE_DIM; i++) {
resultValue += result_tile[i];
}
result[0] = resultValue;
}
}
template<typename T>
__device__ void columnDotRow(const T* vectorA, const T* vectorB, T* resultMatrix,
const int lengthA, const int lengthB) {
__shared__ T vectorA_tile[TILE_DIM];
__shared__ T vectorB_tile[TILE_DIM];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (ty == 0) {
if (row + tx < lengthA) {
vectorA_tile[tx] = vectorA[row + tx];
}
if (col < lengthB) {
vectorB_tile[tx] = vectorB[col];
}
}
__syncthreads();
if (row < lengthA && col < lengthB) {
resultMatrix[row * lengthB + col] = vectorA_tile[ty] * vectorB_tile[tx];
}
}
|
784e265235d67f99c7a4278963555c7fa5f081f1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "luaT.h"
#include "THH.h"
#include "hip/hip_runtime.h"
#include "aux.cuh"
#include <thrust/transform.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
// no-overlop
__global__ void output_kernel(float *input, float* output, float* weight, int input_h, int input_w,
int output_h, int output_w, int kW, int kH){
float* ptr_input_plane = input + (blockIdx.x + gridDim.x * blockIdx.y) * input_w * input_h;
float* ptr_output_plane = output + (blockIdx.x + gridDim.x * blockIdx.y) * output_w * output_h;
float* weight_plane = weight + blockIdx.x * kW * kH;
int xout = threadIdx.x;
int yout = threadIdx.y;
const int xout_step = blockDim.x;
const int yout_step = blockDim.y;
int xin_start = threadIdx.x * kW;
int yin_start = threadIdx.y * kH;
const int xin_step = blockDim.x * kW;
const int yin_step = blockDim.y * kH;
int xin_end = (input_w/kW) * kW; //TODO could this be right?
int yin_end = (input_h/kH) * kH;
for (int yin = yin_start; yin < yin_end ; yin += yin_step){
for (int xin = xin_start; xin < xin_end; xin += xin_step){
float* ptr_input = ptr_input_plane + xin + yin * input_w;
float* ptr_output = ptr_output_plane + xout + yout * output_w;
if (xout < output_w && yout < output_h){
for (int ky = 0; ky < kH && yin + ky < input_h; ky++){
for (int kx = 0; kx < kW && xin + kx < input_w; kx++){
float* weight_plane_elem = weight_plane + kx + ky * kW;
float* ptr_input_elem = ptr_input + kx + ky * input_w;
*ptr_output += (*ptr_input_elem) * (*weight_plane_elem);
}
}
} // end if
xout += xout_step;
} // end for xout
yout += yout_step;
} // end for yout
}
__global__ void grad_input_kernel(float* input, float* grad_output, float* grad_input, float* weight, int input_h,
int input_w, int output_h, int output_w, int kW, int kH){
float* ptr_grad_output_plane = grad_output + (blockIdx.x + gridDim.x * blockIdx.y) * output_w * output_h;
float* ptr_grad_input_plane = grad_input + (blockIdx.x + gridDim.x * blockIdx.y) * input_w * input_h;
float* weight_plane = weight + blockIdx.x * kW * kH;
int xout = threadIdx.x;
int yout = threadIdx.y;
const int xout_step = blockDim.x;
const int yout_step = blockDim.y;
int xin_start = threadIdx.x * kW;
int yin_start = threadIdx.y * kH;
const int xin_step = blockDim.x * kW;
const int yin_step = blockDim.y * kH;
int xin_end = (input_w/kW) * kW; //TODO could this be right?
int yin_end = (input_h/kH) * kH;
for (int yin = yin_start; yin < yin_end; yin += yin_step){
for (int xin = xin_start; xin < xin_end; xin += xin_step){
float* ptr_grad_input = ptr_grad_input_plane + xin + yin * input_w;
float* ptr_grad_output_elem = ptr_grad_output_plane + xout + yout * output_w;
if (xout < output_w && yout < output_h){
for (int ky = 0; ky < kH && yin + ky < input_h; ky++){
for (int kx = 0; kx < kW && xin + kx < input_w; kx++){
float* ptr_grad_input_elem = ptr_grad_input + kx + ky * input_w;
float* weight_plane_elem = weight_plane + kx + ky * kW;
*ptr_grad_input_elem = (*weight_plane_elem) * (*ptr_grad_output_elem);
} // end for kx
} // end for ky
}
xout += xout_step;
} // end for xin
yout += yout_step;
} // end for yin
}
__global__ void accGrad_kernel(float* input, float* grad_output, float* grad_weight, float* weight, int input_h,
int input_w, int output_h, int output_w, int kW, int kH, float scale){
float* ptr_input_plane = input + (blockIdx.x + gridDim.x * blockIdx.y) * input_w * input_h;
float* ptr_grad_output_plane = grad_output + (blockIdx.x + gridDim.x * blockIdx.y) * output_w * output_h;
float* grad_weight_plane = grad_weight + blockIdx.x * kW * kH;
int xout = threadIdx.x;
int yout = threadIdx.y;
const int xout_step = blockDim.x;
const int yout_step = blockDim.y;
int xin_start = threadIdx.x * kW;
int yin_start = threadIdx.y * kH;
const int xin_step = blockDim.x * kW;
const int yin_step = blockDim.y * kH;
int xin_end = (input_w/kW) * kW; //TODO could this be right?
int yin_end = (input_h/kH) * kH;
for (int yin = yin_start; yin < yin_end; yin += yin_step){
for (int xin = xin_start; xin < xin_end; xin += xin_step){
float* ptr_grad_output_plane_elem = ptr_grad_output_plane + xout + yout * output_w;
float* ptr_input_plane_elem = ptr_input_plane + xin + yin * input_w;
if (xout < output_w && yout < output_h){
for (int ky = 0; ky < kH && yin + ky < input_h; ky++){
for (int kx = 0; kx < kW && xin + kx < input_w; kx++){
float* grad_weight_plane_elem = grad_weight_plane + kx + ky * kW;
float* ptr_input_plane_elem_elem = ptr_input_plane_elem + kx + ky * input_w;
float tmp = scale * (*ptr_input_plane_elem_elem) * (*ptr_grad_output_plane_elem);
atomicAdd(grad_weight_plane_elem, tmp);
}
}
}
xout += xout_step;
} // end for xin
yout += yout_step;
} // end for yin
}
static int cunn_SpatialMlpPooling_updateOutput(lua_State *L){
THCState* state = getCutorchState(L);
THCudaTensor* input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor* output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THCudaTensor* weight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
float* output_data;
float* input_data;
float* weight_data;
long nInputCols = input -> size[3];
long nInputRows = input -> size[2];
long nInputPlane = input -> size[1];
long nBatch = input -> size[0];
long nOutputCols = nInputCols / kW;
long nOutputRows = nInputRows / kH;
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(state, input);
input_data = THCudaTensor_data(state, input);
weight_data = THCudaTensor_data(state, weight);
THCudaTensor_resize4d(state, output, nBatch, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_zero(state, output);
output_data = THCudaTensor_data(state, output);
dim3 blocks(nInputPlane, nBatch);
dim3 threads(32,8);
hipLaunchKernelGGL(( output_kernel) , dim3(blocks), dim3(threads), 0, 0, input_data, output_data, weight_data, nInputRows, nInputCols, nOutputRows, nOutputCols, kW, kH);
THCudaTensor_free(state, input);
hipError_t err = hipGetLastError();
if (err != hipSuccess){
printf("error in SpatialMaxPoolingPos.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
static int cunn_SpatialMlpPooling_updateGradInput(lua_State *L){
THCState* state = getCutorchState(L);
THCudaTensor* input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor* gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor* weight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor* gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
float* input_data;
float* weight_data;
float* gradOutput_data;
float* gradInput_data;
long nInputCols = input -> size[3];
long nInputRows = input -> size[2];
long nInputPlane = input -> size[1];
long nBatch = input -> size[0];
long nOutputCols = nInputCols / kW;
long nOutputRows = nInputRows / kH;
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state ,gradOutput);
input_data = THCudaTensor_data(state, input);
gradOutput_data = THCudaTensor_data(state, gradOutput);
gradInput_data = THCudaTensor_data(state, gradInput);
weight_data = THCudaTensor_data(state, weight);
dim3 blocks(nInputPlane, nBatch);
dim3 threads(32,8);
hipLaunchKernelGGL(( grad_input_kernel) , dim3(blocks), dim3(threads), 0, 0, input_data, gradOutput_data, gradInput_data, weight_data, nInputRows, nInputCols, nOutputRows, nOutputCols, kW, kH);
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
hipError_t err = hipGetLastError();
if (err != hipSuccess){
printf("error in SpatialMaxPoolingPos.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
static int cunn_SpatialMlpPooling_accGradParameters(lua_State *L){
THCState* state = getCutorchState(L);
THCudaTensor* input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor* gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor* weight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor* gradWeight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradWeight", "torch.CudaTensor");
float scale = luaL_checknumber(L, 4);
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
float* input_data;
float* weight_data;
float* gradOutput_data;
float* gradWeight_data;
long nInputCols = input -> size[3];
long nInputRows = input -> size[2];
long nInputPlane = input -> size[1];
long nBatch = input -> size[0];
long nOutputCols = nInputCols / kW;
long nOutputRows = nInputRows / kH;
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
THCudaTensor_resizeAs(state, gradWeight, weight);
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
input_data = THCudaTensor_data(state, input);
gradOutput_data = THCudaTensor_data(state, gradOutput);
gradWeight_data = THCudaTensor_data(state, gradWeight);
weight_data = THCudaTensor_data(state, weight);
dim3 blocks(nInputPlane, nBatch);
dim3 threads(32,8);
hipLaunchKernelGGL(( accGrad_kernel) , dim3(blocks), dim3(threads), 0, 0, input_data, gradOutput_data, gradWeight_data, weight_data, nInputRows, nInputCols, nOutputRows, nOutputCols, kW, kH, scale);
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
hipError_t err = hipGetLastError();
if (err != hipSuccess){
printf("error in SpatialMaxPoolingPos.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_SpatialMlpPooling__ [] = {
{"SpatialMlpPooling_updateOutput", cunn_SpatialMlpPooling_updateOutput},
{"SpatialMlpPooling_updateGradInput", cunn_SpatialMlpPooling_updateGradInput},
{"SpatialMlpPooling_accGradParameters", cunn_SpatialMlpPooling_accGradParameters},
{NULL, NULL}
};
void cunn_SpatialMlpPooling_init(lua_State* L){
luaL_openlib(L, "jz", cunn_SpatialMlpPooling__, 0) ;
}
|
784e265235d67f99c7a4278963555c7fa5f081f1.cu
|
#include "luaT.h"
#include "THC.h"
#include "cuda.h"
#include "aux.cuh"
#include <thrust/transform.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
// no-overlop
__global__ void output_kernel(float *input, float* output, float* weight, int input_h, int input_w,
int output_h, int output_w, int kW, int kH){
float* ptr_input_plane = input + (blockIdx.x + gridDim.x * blockIdx.y) * input_w * input_h;
float* ptr_output_plane = output + (blockIdx.x + gridDim.x * blockIdx.y) * output_w * output_h;
float* weight_plane = weight + blockIdx.x * kW * kH;
int xout = threadIdx.x;
int yout = threadIdx.y;
const int xout_step = blockDim.x;
const int yout_step = blockDim.y;
int xin_start = threadIdx.x * kW;
int yin_start = threadIdx.y * kH;
const int xin_step = blockDim.x * kW;
const int yin_step = blockDim.y * kH;
int xin_end = (input_w/kW) * kW; //TODO could this be right?
int yin_end = (input_h/kH) * kH;
for (int yin = yin_start; yin < yin_end ; yin += yin_step){
for (int xin = xin_start; xin < xin_end; xin += xin_step){
float* ptr_input = ptr_input_plane + xin + yin * input_w;
float* ptr_output = ptr_output_plane + xout + yout * output_w;
if (xout < output_w && yout < output_h){
for (int ky = 0; ky < kH && yin + ky < input_h; ky++){
for (int kx = 0; kx < kW && xin + kx < input_w; kx++){
float* weight_plane_elem = weight_plane + kx + ky * kW;
float* ptr_input_elem = ptr_input + kx + ky * input_w;
*ptr_output += (*ptr_input_elem) * (*weight_plane_elem);
}
}
} // end if
xout += xout_step;
} // end for xout
yout += yout_step;
} // end for yout
}
__global__ void grad_input_kernel(float* input, float* grad_output, float* grad_input, float* weight, int input_h,
int input_w, int output_h, int output_w, int kW, int kH){
float* ptr_grad_output_plane = grad_output + (blockIdx.x + gridDim.x * blockIdx.y) * output_w * output_h;
float* ptr_grad_input_plane = grad_input + (blockIdx.x + gridDim.x * blockIdx.y) * input_w * input_h;
float* weight_plane = weight + blockIdx.x * kW * kH;
int xout = threadIdx.x;
int yout = threadIdx.y;
const int xout_step = blockDim.x;
const int yout_step = blockDim.y;
int xin_start = threadIdx.x * kW;
int yin_start = threadIdx.y * kH;
const int xin_step = blockDim.x * kW;
const int yin_step = blockDim.y * kH;
int xin_end = (input_w/kW) * kW; //TODO could this be right?
int yin_end = (input_h/kH) * kH;
for (int yin = yin_start; yin < yin_end; yin += yin_step){
for (int xin = xin_start; xin < xin_end; xin += xin_step){
float* ptr_grad_input = ptr_grad_input_plane + xin + yin * input_w;
float* ptr_grad_output_elem = ptr_grad_output_plane + xout + yout * output_w;
if (xout < output_w && yout < output_h){
for (int ky = 0; ky < kH && yin + ky < input_h; ky++){
for (int kx = 0; kx < kW && xin + kx < input_w; kx++){
float* ptr_grad_input_elem = ptr_grad_input + kx + ky * input_w;
float* weight_plane_elem = weight_plane + kx + ky * kW;
*ptr_grad_input_elem = (*weight_plane_elem) * (*ptr_grad_output_elem);
} // end for kx
} // end for ky
}
xout += xout_step;
} // end for xin
yout += yout_step;
} // end for yin
}
__global__ void accGrad_kernel(float* input, float* grad_output, float* grad_weight, float* weight, int input_h,
int input_w, int output_h, int output_w, int kW, int kH, float scale){
float* ptr_input_plane = input + (blockIdx.x + gridDim.x * blockIdx.y) * input_w * input_h;
float* ptr_grad_output_plane = grad_output + (blockIdx.x + gridDim.x * blockIdx.y) * output_w * output_h;
float* grad_weight_plane = grad_weight + blockIdx.x * kW * kH;
int xout = threadIdx.x;
int yout = threadIdx.y;
const int xout_step = blockDim.x;
const int yout_step = blockDim.y;
int xin_start = threadIdx.x * kW;
int yin_start = threadIdx.y * kH;
const int xin_step = blockDim.x * kW;
const int yin_step = blockDim.y * kH;
int xin_end = (input_w/kW) * kW; //TODO could this be right?
int yin_end = (input_h/kH) * kH;
for (int yin = yin_start; yin < yin_end; yin += yin_step){
for (int xin = xin_start; xin < xin_end; xin += xin_step){
float* ptr_grad_output_plane_elem = ptr_grad_output_plane + xout + yout * output_w;
float* ptr_input_plane_elem = ptr_input_plane + xin + yin * input_w;
if (xout < output_w && yout < output_h){
for (int ky = 0; ky < kH && yin + ky < input_h; ky++){
for (int kx = 0; kx < kW && xin + kx < input_w; kx++){
float* grad_weight_plane_elem = grad_weight_plane + kx + ky * kW;
float* ptr_input_plane_elem_elem = ptr_input_plane_elem + kx + ky * input_w;
float tmp = scale * (*ptr_input_plane_elem_elem) * (*ptr_grad_output_plane_elem);
atomicAdd(grad_weight_plane_elem, tmp);
}
}
}
xout += xout_step;
} // end for xin
yout += yout_step;
} // end for yin
}
static int cunn_SpatialMlpPooling_updateOutput(lua_State *L){
THCState* state = getCutorchState(L);
THCudaTensor* input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor* output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THCudaTensor* weight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
float* output_data;
float* input_data;
float* weight_data;
long nInputCols = input -> size[3];
long nInputRows = input -> size[2];
long nInputPlane = input -> size[1];
long nBatch = input -> size[0];
long nOutputCols = nInputCols / kW;
long nOutputRows = nInputRows / kH;
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCudaTensor_newContiguous(state, input);
input_data = THCudaTensor_data(state, input);
weight_data = THCudaTensor_data(state, weight);
THCudaTensor_resize4d(state, output, nBatch, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_zero(state, output);
output_data = THCudaTensor_data(state, output);
dim3 blocks(nInputPlane, nBatch);
dim3 threads(32,8);
output_kernel <<<blocks, threads>>> (input_data, output_data, weight_data, nInputRows, nInputCols, nOutputRows, nOutputCols, kW, kH);
THCudaTensor_free(state, input);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess){
printf("error in SpatialMaxPoolingPos.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static int cunn_SpatialMlpPooling_updateGradInput(lua_State *L){
THCState* state = getCutorchState(L);
THCudaTensor* input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor* gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor* weight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor* gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
float* input_data;
float* weight_data;
float* gradOutput_data;
float* gradInput_data;
long nInputCols = input -> size[3];
long nInputRows = input -> size[2];
long nInputPlane = input -> size[1];
long nBatch = input -> size[0];
long nOutputCols = nInputCols / kW;
long nOutputRows = nInputRows / kH;
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state ,gradOutput);
input_data = THCudaTensor_data(state, input);
gradOutput_data = THCudaTensor_data(state, gradOutput);
gradInput_data = THCudaTensor_data(state, gradInput);
weight_data = THCudaTensor_data(state, weight);
dim3 blocks(nInputPlane, nBatch);
dim3 threads(32,8);
grad_input_kernel <<<blocks, threads>>> (input_data, gradOutput_data, gradInput_data, weight_data, nInputRows, nInputCols, nOutputRows, nOutputCols, kW, kH);
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess){
printf("error in SpatialMaxPoolingPos.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static int cunn_SpatialMlpPooling_accGradParameters(lua_State *L){
THCState* state = getCutorchState(L);
THCudaTensor* input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor* gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor* weight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor* gradWeight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradWeight", "torch.CudaTensor");
float scale = luaL_checknumber(L, 4);
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
float* input_data;
float* weight_data;
float* gradOutput_data;
float* gradWeight_data;
long nInputCols = input -> size[3];
long nInputRows = input -> size[2];
long nInputPlane = input -> size[1];
long nBatch = input -> size[0];
long nOutputCols = nInputCols / kW;
long nOutputRows = nInputRows / kH;
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
THCudaTensor_resizeAs(state, gradWeight, weight);
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
input_data = THCudaTensor_data(state, input);
gradOutput_data = THCudaTensor_data(state, gradOutput);
gradWeight_data = THCudaTensor_data(state, gradWeight);
weight_data = THCudaTensor_data(state, weight);
dim3 blocks(nInputPlane, nBatch);
dim3 threads(32,8);
accGrad_kernel <<<blocks, threads>>> (input_data, gradOutput_data, gradWeight_data, weight_data, nInputRows, nInputCols, nOutputRows, nOutputCols, kW, kH, scale);
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess){
printf("error in SpatialMaxPoolingPos.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_SpatialMlpPooling__ [] = {
{"SpatialMlpPooling_updateOutput", cunn_SpatialMlpPooling_updateOutput},
{"SpatialMlpPooling_updateGradInput", cunn_SpatialMlpPooling_updateGradInput},
{"SpatialMlpPooling_accGradParameters", cunn_SpatialMlpPooling_accGradParameters},
{NULL, NULL}
};
void cunn_SpatialMlpPooling_init(lua_State* L){
luaL_openlib(L, "jz", cunn_SpatialMlpPooling__, 0) ;
}
|
2782fc900ac4bcd106b3413758bad6e2f819f9c4.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2022 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <utility>
#include <vector>
#include "xgboost/base.h"
#include "xgboost/data.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/bitfield.h"
#include "../common/timer.h"
#include "../common/categorical.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "driver.h"
#include "updater_gpu_common.cuh"
#include "split_evaluator.h"
#include "constraints.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/expand_entry.cuh"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_.at(nidx);
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
private:
GPUHistEvaluator<GradientSumT> evaluator_;
Context const* ctx_;
public:
EllpackPageImpl const* page;
common::Span<FeatureType const> feature_types;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
dh::caching_device_vector<GradientPair> d_gpair; // storage for gpair;
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
/*! \brief Sum gradient for each node. */
std::vector<GradientPairPrecise> node_sum_gradients;
TrainParam param;
HistRounding<GradientSumT> histogram_rounding;
dh::PinnedMemory pinned;
common::Monitor monitor;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
GPUHistMakerDevice(Context const* ctx, EllpackPageImpl const* _page,
common::Span<FeatureType const> _feature_types, bst_uint _n_rows,
TrainParam _param, uint32_t column_sampler_seed, uint32_t n_features,
BatchParam _batch_param)
: evaluator_{_param, n_features, ctx->gpu_id},
ctx_(ctx),
page(_page),
feature_types{_feature_types},
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
batch_param(std::move(_batch_param)) {
sampler.reset(new GradientBasedSampler(page, _n_rows, batch_param, param.subsample,
param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(ctx_->gpu_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(ctx_->gpu_id));
feature_groups.reset(new FeatureGroups(page->Cuts(), page->is_dense,
dh::MaxSharedMemoryOptin(ctx_->gpu_id),
sizeof(GradientSumT)));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler.Init(num_columns, info.feature_weights.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
this->evaluator_.Reset(page->Cuts(), feature_types, dmat->Info().num_col_, param,
ctx_->gpu_id);
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), GradientPairPrecise{});
if (d_gpair.size() != dh_gpair->Size()) {
d_gpair.resize(dh_gpair->Size());
}
dh::safe_cuda(hipMemcpyAsync(
d_gpair.data().get(), dh_gpair->ConstDevicePointer(),
dh_gpair->Size() * sizeof(GradientPair), hipMemcpyDeviceToDevice));
auto sample = sampler->Sample(dh::ToSpan(d_gpair), dmat);
page = sample.page;
gpair = sample.gpair;
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(ctx_->gpu_id, sample.sample_rows));
hist.Reset();
}
GPUExpandEntry EvaluateRootSplit(GradientPairPrecise root_sum, float weight) {
int nidx = RegTree::kRoot;
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitInputs<GradientSumT> inputs{nidx,
root_sum,
gpu_param,
feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(nidx)};
auto split = this->evaluator_.EvaluateSingleSplit(inputs, weight);
return split;
}
void EvaluateLeftRightSplits(GPUExpandEntry candidate, int left_nidx, int right_nidx,
const RegTree& tree,
common::Span<GPUExpandEntry> pinned_candidates_out) {
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2);
GPUTrainingParam gpu_param(param);
auto left_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx);
auto right_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(), left_nidx);
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitInputs<GradientSumT> left{left_nidx,
candidate.split.left_sum,
gpu_param,
left_feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(left_nidx)};
EvaluateSplitInputs<GradientSumT> right{right_nidx,
candidate.split.right_sum,
gpu_param,
right_feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(right_nidx)};
dh::TemporaryArray<GPUExpandEntry> entries(2);
this->evaluator_.EvaluateSplits(candidate, left, right, dh::ToSpan(entries));
dh::safe_cuda(hipMemcpyAsync(pinned_candidates_out.data(), entries.data().get(),
sizeof(GPUExpandEntry) * entries.size(), hipMemcpyDeviceToHost));
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(ctx_->gpu_id),
feature_groups->DeviceAccessor(ctx_->gpu_id), gpair,
d_ridx, d_node_hist, histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) && hist.HistogramExists(nidx_parent);
}
void UpdatePosition(const GPUExpandEntry &e, RegTree* p_tree) {
RegTree::Node split_node = (*p_tree)[e.nid];
auto split_type = p_tree->NodeSplitType(e.nid);
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
auto node_cats = e.split.split_cats.Bits();
row_partitioner->UpdatePosition(
e.nid, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
bst_node_t new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
bool go_left = true;
if (split_type == FeatureType::kCategorical) {
go_left = common::Decision<false>(node_cats, cut_value, split_node.DefaultLeft());
} else {
go_left = cut_value <= split_node.SplitCond();
}
if (go_left) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat, ObjInfo task,
HostDeviceVector<bst_node_t>* p_out_position) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(ctx_->gpu_id, p_fmat->Info().num_row_));
}
if (task.UpdateTreeLeaf() && !p_fmat->SingleColBlock() && param.subsample != 1.0) {
// see comment in the `FinalisePositionInPage`.
LOG(FATAL) << "Current objective function can not be used with subsampled external memory.";
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes), dh::ToSpan(d_split_types),
dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments), task,
p_out_position);
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes), dh::ToSpan(d_split_types),
dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments), task,
p_out_position);
}
}
}
void FinalisePositionInPage(EllpackPageImpl const *page,
const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types,
common::Span<uint32_t const> categories,
common::Span<RegTree::Segment> categories_segments,
ObjInfo task,
HostDeviceVector<bst_node_t>* p_out_position) {
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
auto d_gpair = this->gpair;
row_partitioner->FinalisePosition(
ctx_, task, p_out_position,
[=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats =
categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision<false>(node_cats, element, node.DefaultLeft());
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
},
[d_gpair] __device__(size_t ridx) {
// FIXME(jiamingy): Doesn't work when sampling is used with external memory as
// the sampler compacts the gradient vector.
return d_gpair[ridx].GetHess() - .0f == 0.f;
});
}
void UpdatePredictionCache(linalg::VectorView<float> out_preds_d, RegTree const* p_tree) {
CHECK(p_tree);
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
CHECK_EQ(out_preds_d.DeviceIdx(), ctx_->gpu_id);
auto d_ridx = row_partitioner->GetRows();
GPUTrainingParam param_d(param);
dh::TemporaryArray<GradientPairPrecise> device_node_sum_gradients(node_sum_gradients.size());
dh::safe_cuda(hipMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(),
sizeof(GradientPairPrecise) * node_sum_gradients.size(),
hipMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = device_node_sum_gradients.data().get();
auto tree_evaluator = evaluator_.GetEvaluator();
auto const& h_nodes = p_tree->GetNodes();
dh::caching_device_vector<RegTree::Node> nodes(h_nodes.size());
dh::safe_cuda(hipMemcpyAsync(nodes.data().get(), h_nodes.data(),
h_nodes.size() * sizeof(RegTree::Node), hipMemcpyHostToDevice));
auto d_nodes = dh::ToSpan(nodes);
dh::LaunchN(d_ridx.size(), [=] XGBOOST_DEVICE(size_t idx) mutable {
bst_node_t nidx = d_position[idx];
auto weight = d_nodes[nidx].LeafValue();
out_preds_d(d_ridx[idx]) += weight;
});
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const GPUExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const GPUExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
// Sanity check - have we created a leaf with no training instances?
if (!rabit::IsDistributed() && row_partitioner) {
CHECK(row_partitioner->GetRows(candidate.nid).size() > 0)
<< "No training instances in this leaf!";
}
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto is_cat = candidate.split.is_cat;
if (is_cat) {
CHECK_LT(candidate.split.fvalue, std::numeric_limits<bst_cat_t>::max())
<< "Categorical feature value too large.";
std::vector<uint32_t> split_cats;
CHECK_GT(candidate.split.split_cats.Bits().size(), 0);
auto h_cats = this->evaluator_.GetHostNodeCats(candidate.nid);
auto max_cat = candidate.split.MaxCat();
split_cats.resize(common::CatBitField::ComputeStorageSize(max_cat + 1), 0);
CHECK_LE(split_cats.size(), h_cats.size());
std::copy(h_cats.data(), h_cats.data() + split_cats.size(), split_cats.data());
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
} else {
tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue,
candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
}
evaluator_.ApplyTreeSplit(candidate, p_tree);
node_sum_gradients[tree[candidate.nid].LeftChild()] = candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] = candidate.split.right_sum;
interaction_constraints.Split(candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
GPUExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
auto gpair_it = dh::MakeTransformIterator<GradientPairPrecise>(
dh::tbegin(gpair), [] __device__(auto const& gpair) { return GradientPairPrecise{gpair}; });
GradientPairPrecise root_sum =
dh::Reduce(thrust::hip::par(alloc), gpair_it, gpair_it + gpair.size(),
GradientPairPrecise{}, thrust::plus<GradientPairPrecise>{});
rabit::Allreduce<rabit::op::Sum, double>(reinterpret_cast<double*>(&root_sum), 2);
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto root_entry = this->EvaluateRootSplit(root_sum, weight);
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat, ObjInfo task,
RegTree* p_tree, dh::AllReducer* reducer,
HostDeviceVector<bst_node_t>* p_out_position) {
auto& tree = *p_tree;
Driver<GPUExpandEntry> driver(static_cast<TrainParam::TreeGrowPolicy>(param.grow_policy));
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, reducer) });
monitor.Stop("InitRoot");
auto num_leaves = 1;
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
auto new_candidates =
pinned.GetSpan<GPUExpandEntry>(expand_set.size() * 2, GPUExpandEntry());
for (auto i = 0ull; i < expand_set.size(); i++) {
auto candidate = expand_set.at(i);
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed_
if (GPUExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.Start("UpdatePosition");
// Update position is only run when child is valid, instead of right after apply
// split (as in approx tree method). Hense we have the finalise position call
// in GPU Hist.
this->UpdatePosition(candidate, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateLeftRightSplits(candidate, left_child_nidx, right_child_nidx, *p_tree,
new_candidates.subspan(i * 2, 2));
monitor.Stop("EvaluateSplits");
} else {
// Set default
new_candidates[i * 2] = GPUExpandEntry();
new_candidates[i * 2 + 1] = GPUExpandEntry();
}
}
dh::DefaultStream().Sync();
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat, task, p_out_position);
monitor.Stop("FinalisePosition");
}
};
class GPUHistMaker : public TreeUpdater {
using GradientSumT = GradientPairPrecise;
public:
explicit GPUHistMaker(GenericParameter const* ctx, ObjInfo task)
: TreeUpdater(ctx), task_{task} {};
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
param_.UpdateAllowUnknown(args);
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
initialised_ = false;
monitor_.Init("updater_gpu_hist");
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
initialised_ = false;
FromJson(config.at("train_param"), ¶m_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
out["train_param"] = ToJson(param_);
}
~GPUHistMaker() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
// build tree
try {
size_t t_idx{0};
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree, &out_position[t_idx]);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
++t_idx;
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
CHECK_GE(ctx_->gpu_id, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({ctx_->gpu_id}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
ctx_->gpu_id,
param_.max_bin,
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
info_->feature_types.SetDevice(ctx_->gpu_id);
maker.reset(new GPUHistMakerDevice<GradientSumT>(
ctx_, page, info_->feature_types.ConstDeviceSpan(), info_->num_row_, param_,
column_sampling_seed, info_->num_col_, batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat, RegTree const* p_tree) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
p_last_tree_ = p_tree;
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree{}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree,
HostDeviceVector<bst_node_t>* p_out_position) {
monitor_.Start("InitData");
this->InitData(p_fmat, p_tree);
monitor_.Stop("InitData");
gpair->SetDevice(ctx_->gpu_id);
maker->UpdateTree(gpair, p_fmat, task_, p_tree, &reducer_, p_out_position);
}
bool UpdatePredictionCache(const DMatrix* data,
linalg::VectorView<bst_float> p_out_preds) override {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
maker->UpdatePredictionCache(p_out_preds, p_last_tree_);
monitor_.Stop("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
char const* Name() const override { return "grow_gpu_hist"; }
private:
bool initialised_{false};
GPUHistMakerTrainParam hist_maker_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_{nullptr};
RegTree const* p_last_tree_{nullptr};
ObjInfo task_;
common::Monitor monitor_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([](GenericParameter const* tparam, ObjInfo task) {
return new GPUHistMaker(tparam, task);
});
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
2782fc900ac4bcd106b3413758bad6e2f819f9c4.cu
|
/*!
* Copyright 2017-2022 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <utility>
#include <vector>
#include "xgboost/base.h"
#include "xgboost/data.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/bitfield.h"
#include "../common/timer.h"
#include "../common/categorical.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "driver.h"
#include "updater_gpu_common.cuh"
#include "split_evaluator.h"
#include "constraints.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/expand_entry.cuh"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
std::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_.at(nidx);
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
private:
GPUHistEvaluator<GradientSumT> evaluator_;
Context const* ctx_;
public:
EllpackPageImpl const* page;
common::Span<FeatureType const> feature_types;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
dh::caching_device_vector<GradientPair> d_gpair; // storage for gpair;
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
/*! \brief Sum gradient for each node. */
std::vector<GradientPairPrecise> node_sum_gradients;
TrainParam param;
HistRounding<GradientSumT> histogram_rounding;
dh::PinnedMemory pinned;
common::Monitor monitor;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
GPUHistMakerDevice(Context const* ctx, EllpackPageImpl const* _page,
common::Span<FeatureType const> _feature_types, bst_uint _n_rows,
TrainParam _param, uint32_t column_sampler_seed, uint32_t n_features,
BatchParam _batch_param)
: evaluator_{_param, n_features, ctx->gpu_id},
ctx_(ctx),
page(_page),
feature_types{_feature_types},
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
batch_param(std::move(_batch_param)) {
sampler.reset(new GradientBasedSampler(page, _n_rows, batch_param, param.subsample,
param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(ctx_->gpu_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(ctx_->gpu_id));
feature_groups.reset(new FeatureGroups(page->Cuts(), page->is_dense,
dh::MaxSharedMemoryOptin(ctx_->gpu_id),
sizeof(GradientSumT)));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler.Init(num_columns, info.feature_weights.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
this->evaluator_.Reset(page->Cuts(), feature_types, dmat->Info().num_col_, param,
ctx_->gpu_id);
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), GradientPairPrecise{});
if (d_gpair.size() != dh_gpair->Size()) {
d_gpair.resize(dh_gpair->Size());
}
dh::safe_cuda(cudaMemcpyAsync(
d_gpair.data().get(), dh_gpair->ConstDevicePointer(),
dh_gpair->Size() * sizeof(GradientPair), cudaMemcpyDeviceToDevice));
auto sample = sampler->Sample(dh::ToSpan(d_gpair), dmat);
page = sample.page;
gpair = sample.gpair;
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(ctx_->gpu_id, sample.sample_rows));
hist.Reset();
}
GPUExpandEntry EvaluateRootSplit(GradientPairPrecise root_sum, float weight) {
int nidx = RegTree::kRoot;
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitInputs<GradientSumT> inputs{nidx,
root_sum,
gpu_param,
feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(nidx)};
auto split = this->evaluator_.EvaluateSingleSplit(inputs, weight);
return split;
}
void EvaluateLeftRightSplits(GPUExpandEntry candidate, int left_nidx, int right_nidx,
const RegTree& tree,
common::Span<GPUExpandEntry> pinned_candidates_out) {
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2);
GPUTrainingParam gpu_param(param);
auto left_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx);
auto right_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(), left_nidx);
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitInputs<GradientSumT> left{left_nidx,
candidate.split.left_sum,
gpu_param,
left_feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(left_nidx)};
EvaluateSplitInputs<GradientSumT> right{right_nidx,
candidate.split.right_sum,
gpu_param,
right_feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(right_nidx)};
dh::TemporaryArray<GPUExpandEntry> entries(2);
this->evaluator_.EvaluateSplits(candidate, left, right, dh::ToSpan(entries));
dh::safe_cuda(cudaMemcpyAsync(pinned_candidates_out.data(), entries.data().get(),
sizeof(GPUExpandEntry) * entries.size(), cudaMemcpyDeviceToHost));
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(ctx_->gpu_id),
feature_groups->DeviceAccessor(ctx_->gpu_id), gpair,
d_ridx, d_node_hist, histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) && hist.HistogramExists(nidx_parent);
}
void UpdatePosition(const GPUExpandEntry &e, RegTree* p_tree) {
RegTree::Node split_node = (*p_tree)[e.nid];
auto split_type = p_tree->NodeSplitType(e.nid);
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
auto node_cats = e.split.split_cats.Bits();
row_partitioner->UpdatePosition(
e.nid, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
bst_node_t new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
bool go_left = true;
if (split_type == FeatureType::kCategorical) {
go_left = common::Decision<false>(node_cats, cut_value, split_node.DefaultLeft());
} else {
go_left = cut_value <= split_node.SplitCond();
}
if (go_left) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat, ObjInfo task,
HostDeviceVector<bst_node_t>* p_out_position) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(ctx_->gpu_id, p_fmat->Info().num_row_));
}
if (task.UpdateTreeLeaf() && !p_fmat->SingleColBlock() && param.subsample != 1.0) {
// see comment in the `FinalisePositionInPage`.
LOG(FATAL) << "Current objective function can not be used with subsampled external memory.";
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes), dh::ToSpan(d_split_types),
dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments), task,
p_out_position);
} else {
for (auto const& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes), dh::ToSpan(d_split_types),
dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments), task,
p_out_position);
}
}
}
void FinalisePositionInPage(EllpackPageImpl const *page,
const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types,
common::Span<uint32_t const> categories,
common::Span<RegTree::Segment> categories_segments,
ObjInfo task,
HostDeviceVector<bst_node_t>* p_out_position) {
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
auto d_gpair = this->gpair;
row_partitioner->FinalisePosition(
ctx_, task, p_out_position,
[=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats =
categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision<false>(node_cats, element, node.DefaultLeft());
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
},
[d_gpair] __device__(size_t ridx) {
// FIXME(jiamingy): Doesn't work when sampling is used with external memory as
// the sampler compacts the gradient vector.
return d_gpair[ridx].GetHess() - .0f == 0.f;
});
}
void UpdatePredictionCache(linalg::VectorView<float> out_preds_d, RegTree const* p_tree) {
CHECK(p_tree);
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
CHECK_EQ(out_preds_d.DeviceIdx(), ctx_->gpu_id);
auto d_ridx = row_partitioner->GetRows();
GPUTrainingParam param_d(param);
dh::TemporaryArray<GradientPairPrecise> device_node_sum_gradients(node_sum_gradients.size());
dh::safe_cuda(cudaMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(),
sizeof(GradientPairPrecise) * node_sum_gradients.size(),
cudaMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = device_node_sum_gradients.data().get();
auto tree_evaluator = evaluator_.GetEvaluator();
auto const& h_nodes = p_tree->GetNodes();
dh::caching_device_vector<RegTree::Node> nodes(h_nodes.size());
dh::safe_cuda(cudaMemcpyAsync(nodes.data().get(), h_nodes.data(),
h_nodes.size() * sizeof(RegTree::Node), cudaMemcpyHostToDevice));
auto d_nodes = dh::ToSpan(nodes);
dh::LaunchN(d_ridx.size(), [=] XGBOOST_DEVICE(size_t idx) mutable {
bst_node_t nidx = d_position[idx];
auto weight = d_nodes[nidx].LeafValue();
out_preds_d(d_ridx[idx]) += weight;
});
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const GPUExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const GPUExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
// Sanity check - have we created a leaf with no training instances?
if (!rabit::IsDistributed() && row_partitioner) {
CHECK(row_partitioner->GetRows(candidate.nid).size() > 0)
<< "No training instances in this leaf!";
}
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto is_cat = candidate.split.is_cat;
if (is_cat) {
CHECK_LT(candidate.split.fvalue, std::numeric_limits<bst_cat_t>::max())
<< "Categorical feature value too large.";
std::vector<uint32_t> split_cats;
CHECK_GT(candidate.split.split_cats.Bits().size(), 0);
auto h_cats = this->evaluator_.GetHostNodeCats(candidate.nid);
auto max_cat = candidate.split.MaxCat();
split_cats.resize(common::CatBitField::ComputeStorageSize(max_cat + 1), 0);
CHECK_LE(split_cats.size(), h_cats.size());
std::copy(h_cats.data(), h_cats.data() + split_cats.size(), split_cats.data());
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
} else {
tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue,
candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
}
evaluator_.ApplyTreeSplit(candidate, p_tree);
node_sum_gradients[tree[candidate.nid].LeftChild()] = candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] = candidate.split.right_sum;
interaction_constraints.Split(candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
GPUExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
auto gpair_it = dh::MakeTransformIterator<GradientPairPrecise>(
dh::tbegin(gpair), [] __device__(auto const& gpair) { return GradientPairPrecise{gpair}; });
GradientPairPrecise root_sum =
dh::Reduce(thrust::cuda::par(alloc), gpair_it, gpair_it + gpair.size(),
GradientPairPrecise{}, thrust::plus<GradientPairPrecise>{});
rabit::Allreduce<rabit::op::Sum, double>(reinterpret_cast<double*>(&root_sum), 2);
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto root_entry = this->EvaluateRootSplit(root_sum, weight);
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat, ObjInfo task,
RegTree* p_tree, dh::AllReducer* reducer,
HostDeviceVector<bst_node_t>* p_out_position) {
auto& tree = *p_tree;
Driver<GPUExpandEntry> driver(static_cast<TrainParam::TreeGrowPolicy>(param.grow_policy));
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, reducer) });
monitor.Stop("InitRoot");
auto num_leaves = 1;
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
auto new_candidates =
pinned.GetSpan<GPUExpandEntry>(expand_set.size() * 2, GPUExpandEntry());
for (auto i = 0ull; i < expand_set.size(); i++) {
auto candidate = expand_set.at(i);
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed_
if (GPUExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.Start("UpdatePosition");
// Update position is only run when child is valid, instead of right after apply
// split (as in approx tree method). Hense we have the finalise position call
// in GPU Hist.
this->UpdatePosition(candidate, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateLeftRightSplits(candidate, left_child_nidx, right_child_nidx, *p_tree,
new_candidates.subspan(i * 2, 2));
monitor.Stop("EvaluateSplits");
} else {
// Set default
new_candidates[i * 2] = GPUExpandEntry();
new_candidates[i * 2 + 1] = GPUExpandEntry();
}
}
dh::DefaultStream().Sync();
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat, task, p_out_position);
monitor.Stop("FinalisePosition");
}
};
class GPUHistMaker : public TreeUpdater {
using GradientSumT = GradientPairPrecise;
public:
explicit GPUHistMaker(GenericParameter const* ctx, ObjInfo task)
: TreeUpdater(ctx), task_{task} {};
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
param_.UpdateAllowUnknown(args);
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
initialised_ = false;
monitor_.Init("updater_gpu_hist");
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
initialised_ = false;
FromJson(config.at("train_param"), ¶m_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
out["train_param"] = ToJson(param_);
}
~GPUHistMaker() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
// build tree
try {
size_t t_idx{0};
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree, &out_position[t_idx]);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
++t_idx;
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
CHECK_GE(ctx_->gpu_id, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({ctx_->gpu_id}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
ctx_->gpu_id,
param_.max_bin,
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
info_->feature_types.SetDevice(ctx_->gpu_id);
maker.reset(new GPUHistMakerDevice<GradientSumT>(
ctx_, page, info_->feature_types.ConstDeviceSpan(), info_->num_row_, param_,
column_sampling_seed, info_->num_col_, batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat, RegTree const* p_tree) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
p_last_tree_ = p_tree;
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree{}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree,
HostDeviceVector<bst_node_t>* p_out_position) {
monitor_.Start("InitData");
this->InitData(p_fmat, p_tree);
monitor_.Stop("InitData");
gpair->SetDevice(ctx_->gpu_id);
maker->UpdateTree(gpair, p_fmat, task_, p_tree, &reducer_, p_out_position);
}
bool UpdatePredictionCache(const DMatrix* data,
linalg::VectorView<bst_float> p_out_preds) override {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
maker->UpdatePredictionCache(p_out_preds, p_last_tree_);
monitor_.Stop("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
char const* Name() const override { return "grow_gpu_hist"; }
private:
bool initialised_{false};
GPUHistMakerTrainParam hist_maker_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_{nullptr};
RegTree const* p_last_tree_{nullptr};
ObjInfo task_;
common::Monitor monitor_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([](GenericParameter const* tparam, ObjInfo task) {
return new GPUHistMaker(tparam, task);
});
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
c8356aa3f87b8892c20953ffd09746b3198b0813.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 30.05.2019
//
#include <ops/declarable/helpers/one_hot.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <array/NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
// x - indices, z - output
template<typename X, typename Z>
__global__ static void onehotCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const uint axis, const uint depth, const Z on, const Z off) {
const auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
__shared__ int xRank, zRank;
__shared__ Nd4jLong zLen, totalThreads, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coord = sharedMem + threadIdx.x * zRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coord);
const auto zOffset = shape::getOffset(zShapeInfo, coord);
const auto depthCoord = coord[axis];
for (uint j = axis; j < zRank - 1; ++j)
coord[j] = coord[j + 1];
const auto xOffset = shape::getOffset(xShapeInfo, coord);
const Nd4jLong idx = x[xOffset];
z[zOffset] = depthCoord == idx ? on : off;
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
static void onehotCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void *vx, const Nd4jLong *xShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const uint axis, const uint depth,
const double on, const double off) {
hipLaunchKernelGGL(( onehotCuda<X,Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, axis, depth, static_cast<Y>(on), static_cast<Y>(off));
}
///////////////////////////////////////////////////////////////////
void onehot(const sd::LaunchContext* context, const NDArray *indices, NDArray *output, const uint axis, const uint depth, const double on, const double off) {
const auto xType = indices->dataType();
const auto zType = output->dataType();
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (output->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(*output->shapeInfo())) * output->rankOf() + 128;
PointersManager manager(context, "onehot");
NDArray::prepareSpecialUse({output}, {indices});
BUILD_DOUBLE_SELECTOR(xType, zType, onehotCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), axis, depth, on, off), LIBND4J_TYPES, LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {indices});
manager.synchronize();
}
}
}
}
|
c8356aa3f87b8892c20953ffd09746b3198b0813.cu
|
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 30.05.2019
//
#include <ops/declarable/helpers/one_hot.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <array/NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
// x - indices, z - output
template<typename X, typename Z>
__global__ static void onehotCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const uint axis, const uint depth, const Z on, const Z off) {
const auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
__shared__ int xRank, zRank;
__shared__ Nd4jLong zLen, totalThreads, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
auto coord = sharedMem + threadIdx.x * zRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coord);
const auto zOffset = shape::getOffset(zShapeInfo, coord);
const auto depthCoord = coord[axis];
for (uint j = axis; j < zRank - 1; ++j)
coord[j] = coord[j + 1];
const auto xOffset = shape::getOffset(xShapeInfo, coord);
const Nd4jLong idx = x[xOffset];
z[zOffset] = depthCoord == idx ? on : off;
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
static void onehotCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void *vx, const Nd4jLong *xShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const uint axis, const uint depth,
const double on, const double off) {
onehotCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, axis, depth, static_cast<Y>(on), static_cast<Y>(off));
}
///////////////////////////////////////////////////////////////////
void onehot(const sd::LaunchContext* context, const NDArray *indices, NDArray *output, const uint axis, const uint depth, const double on, const double off) {
const auto xType = indices->dataType();
const auto zType = output->dataType();
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (output->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(decltype(*output->shapeInfo())) * output->rankOf() + 128;
PointersManager manager(context, "onehot");
NDArray::prepareSpecialUse({output}, {indices});
BUILD_DOUBLE_SELECTOR(xType, zType, onehotCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), axis, depth, on, off), LIBND4J_TYPES, LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {indices});
manager.synchronize();
}
}
}
}
|
9d1e2a9d2f79aa612c3ae34d80af2e5f94bda83f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THH.h"
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHBlas.h"
#include "THHTensorCopy.h"
#include "THHTensorRandom.h"
#include "THHHalf.h"
#include "THHApply.cuh"
#include "THHReduce.cuh"
#include "THHDeviceUtils.cuh"
#include "THHNumerics.cuh"
#include "THHAtomics.cuh"
#include "THHThrustAllocator.cuh"
#include "THHTensorSort.cuh"
#include "THHTensor.hpp"
#include "THHStorage.hpp"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <algorithm> // for std::min
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexCopyLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexCopySmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType innerSize,
int64_t dstCopyDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstCopyDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexCopySmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexCopyLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstCopyDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstCopyDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstAddDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexFillLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int IdxDim>
__global__ void indexFillSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex_ < dstFillDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexFillSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexFillLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex_ < dstFillDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(srcIndex < srcSelectDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
template <int Dims, typename T, typename IndexType>
__device__ __forceinline__ IndexType indexToOffset(
const TensorInfo<T, IndexType>& info,
int64_t index,
IndexType size)
{
IndexType linearIndex = static_cast<IndexType>(index);
assert(linearIndex < size && linearIndex >= -size);
if (linearIndex < 0) {
linearIndex += size;
}
return IndexToOffset<T, IndexType, Dims>::get(linearIndex, info) - TH_INDEX_BASE;
}
struct WrapIndexOp {
WrapIndexOp(int64_t size) : size(size) {}
__device__ __forceinline__ void operator()(int64_t* out, int64_t* in) {
auto idx = *in;
assert(idx < size && idx >= -size);
*out = idx < 0 ? idx + size : idx;
}
int64_t size;
};
template <typename T, typename IndexType, int Dims>
struct TensorTakeOp {
TensorTakeOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* out, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
*out = info.data[offset];
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutOp {
TensorPutOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
info.data[offset] = *value;
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutAccumulateOp {
TensorPutAccumulateOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t* start, int64_t* end)
: info(info), numel(numel), start(start), end(end) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
if (index == start || *index != *(index - 1)) {
int64_t linear_index = *index;
auto offset = indexToOffset<Dims>(info, linear_index, numel);
do {
info.data[offset] = THCNumerics<T>::add(info.data[offset], *value);
index++;
value++;
} while (index != end && *index == linear_index);
}
}
const TensorInfo<T, IndexType> info;
IndexType numel;
int64_t* start;
int64_t* end;
};
template<typename IndexType, typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePutImpl(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
// These are only valid if index is contiguous
auto start = THCudaLongTensor_data(state, index);
auto end = start + THCudaLongTensor_numel(state, index);
auto aInfo = getTensorInfo<T, TensorType, IndexType>(state, a);
aInfo.collapseDims();
auto numel = THCTensor_nElement(state, a);
if (aInfo.isContiguous()) {
auto op = Op<T, IndexType, -2>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
} else {
auto op = Op<T, IndexType, -1>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
}
}
template<typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePut(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
if (THCTensor_canUse32BitIndexMath(state, a, INT_MAX)) {
dispatchTakePutImpl<int32_t, T, Op>(state, a, b, index);
} else {
dispatchTakePutImpl<int64_t, T, Op>(state, a, b, index);
}
}
#include "generic/THCTensorIndex.cu"
#include "THHGenerateAllTypes.h"
|
9d1e2a9d2f79aa612c3ae34d80af2e5f94bda83f.cu
|
#include "THC.h"
#include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCBlas.h"
#include "THCTensorCopy.h"
#include "THCTensorRandom.h"
#include "THCHalf.h"
#include "THCApply.cuh"
#include "THCReduce.cuh"
#include "THCDeviceUtils.cuh"
#include "THCNumerics.cuh"
#include "THCAtomics.cuh"
#include "THCThrustAllocator.cuh"
#include "THCTensorSort.cuh"
#include "THCTensor.hpp"
#include "THCStorage.hpp"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <algorithm> // for std::min
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexCopyLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexCopySmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType innerSize,
int64_t dstCopyDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstCopyDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexCopySmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexCopyLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstCopyDim,
int srcCopyDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstCopyDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstCopyDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstCopyDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcCopyDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexAddLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexAddSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType innerSize,
int64_t dstAddDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType srcIndex = 0; srcIndex < indices.sizes[0]; ++srcIndex) {
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstAddDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexAddSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexAddLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstAddDim,
int srcAddDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstAddDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType srcIndex, elementInSlice;
if (IndexIsMajor) {
srcIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
srcIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(srcIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex < dstAddDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstAddDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcAddDim];
atomicAdd(&dst.data[dstOffset], src.data[srcOffset]);
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexFillLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int IdxDim>
__global__ void indexFillSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex_ < dstFillDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexFillSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexFillLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<int64_t, IndexType> indices,
int dstFillDim,
IndexType totalSize,
IndexType innerSize,
int64_t dstFillDimSize,
T val) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType dstIndex_ =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(dstIndex_ < dstFillDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex_ * dst.strides[dstFillDim];
dst.data[dstOffset] = val;
}
}
// We prefer this kernel to avoid reloading index points if the number
// of indices is a small number.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is large, then the
// indexSelectLargeIndex kernel is a better choice to increase
// parallelism.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim>
__global__ void indexSelectSmallIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType innerSize,
int64_t srcSelectDimSize) {
// In order to avoid reloading the index that we are copying, load
// it once to handle all of the points that are being selected, so
// it can be reused as much as possible. This kernel is chosen when
// this is a good choice (small number of chosen indices), since
// re-accessing indices in addition to src elements can be slow.
for (IndexType dstIndex = 0; dstIndex < indices.sizes[0]; ++dstIndex) {
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(srcIndex < srcSelectDimSize);
// We stride over the output ignoring the indexed dimension
// (innerSize), whose offset calculation is handled differently
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < innerSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(linearIndex, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(linearIndex, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
}
// We prefer this kernel to balance parallelism across index points,
// if there are a large number of indices.
// This kernel in fact works for all choices of problem size, but if
// the number of indices chosen is small, then the
// indexSelectSmallIndex kernel is a better choice to reduce memory
// accesses.
template <typename T, typename IndexType, int DstDim, int SrcDim, int IdxDim,
bool IndexIsMajor>
__global__ void indexSelectLargeIndex(TensorInfo<T, IndexType> dst,
TensorInfo<T, IndexType> src,
TensorInfo<int64_t, IndexType> indices,
int dstSelectDim,
int srcSelectDim,
IndexType totalSize,
IndexType innerSize,
int64_t srcSelectDimSize) {
// We stride over the output including the indexed dimension
// (totalSize), and calculate the destination index point based on that
for (IndexType linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < totalSize;
linearIndex += gridDim.x * blockDim.x) {
IndexType dstIndex, elementInSlice;
if (IndexIsMajor) {
dstIndex = linearIndex / innerSize;
elementInSlice = linearIndex % innerSize;
}
else {
elementInSlice = linearIndex / innerSize;
dstIndex = linearIndex % innerSize;
}
// Lua indices begin at 1
IndexType srcIndex =
indices.data[IndexToOffset<int64_t, IndexType, IdxDim>::get(dstIndex, indices)] - TH_INDEX_BASE;
assert(srcIndex < srcSelectDimSize);
IndexType dstOffset =
IndexToOffset<T, IndexType, DstDim>::get(elementInSlice, dst);
dstOffset += dstIndex * dst.strides[dstSelectDim];
IndexType srcOffset =
IndexToOffset<T, IndexType, SrcDim>::get(elementInSlice, src);
srcOffset += srcIndex * src.strides[srcSelectDim];
dst.data[dstOffset] = src.data[srcOffset];
}
}
template <int Dims, typename T, typename IndexType>
__device__ __forceinline__ IndexType indexToOffset(
const TensorInfo<T, IndexType>& info,
int64_t index,
IndexType size)
{
IndexType linearIndex = static_cast<IndexType>(index);
assert(linearIndex < size && linearIndex >= -size);
if (linearIndex < 0) {
linearIndex += size;
}
return IndexToOffset<T, IndexType, Dims>::get(linearIndex, info) - TH_INDEX_BASE;
}
struct WrapIndexOp {
WrapIndexOp(int64_t size) : size(size) {}
__device__ __forceinline__ void operator()(int64_t* out, int64_t* in) {
auto idx = *in;
assert(idx < size && idx >= -size);
*out = idx < 0 ? idx + size : idx;
}
int64_t size;
};
template <typename T, typename IndexType, int Dims>
struct TensorTakeOp {
TensorTakeOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* out, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
*out = info.data[offset];
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutOp {
TensorPutOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t*, int64_t*)
: info(info), numel(numel) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
auto offset = indexToOffset<Dims>(info, *index, numel);
info.data[offset] = *value;
}
const TensorInfo<T, IndexType> info;
IndexType numel;
};
template <typename T, typename IndexType, int Dims>
struct TensorPutAccumulateOp {
TensorPutAccumulateOp(TensorInfo<T, IndexType> info, IndexType numel, int64_t* start, int64_t* end)
: info(info), numel(numel), start(start), end(end) {}
__device__ __forceinline__ void operator()(T* value, int64_t* index) {
if (index == start || *index != *(index - 1)) {
int64_t linear_index = *index;
auto offset = indexToOffset<Dims>(info, linear_index, numel);
do {
info.data[offset] = THCNumerics<T>::add(info.data[offset], *value);
index++;
value++;
} while (index != end && *index == linear_index);
}
}
const TensorInfo<T, IndexType> info;
IndexType numel;
int64_t* start;
int64_t* end;
};
template<typename IndexType, typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePutImpl(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
// These are only valid if index is contiguous
auto start = THCudaLongTensor_data(state, index);
auto end = start + THCudaLongTensor_numel(state, index);
auto aInfo = getTensorInfo<T, TensorType, IndexType>(state, a);
aInfo.collapseDims();
auto numel = THCTensor_nElement(state, a);
if (aInfo.isContiguous()) {
auto op = Op<T, IndexType, -2>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
} else {
auto op = Op<T, IndexType, -1>(aInfo, numel, start, end);
THC_pointwiseApply2<T, int64_t>(state, b, index, op);
}
}
template<typename T, template<class, class, int> class Op, typename TensorType>
void dispatchTakePut(THCState *state, TensorType *a, TensorType *b, THCudaLongTensor *index) {
if (THCTensor_canUse32BitIndexMath(state, a, INT_MAX)) {
dispatchTakePutImpl<int32_t, T, Op>(state, a, b, index);
} else {
dispatchTakePutImpl<int64_t, T, Op>(state, a, b, index);
}
}
#include "generic/THCTensorIndex.cu"
#include "THCGenerateAllTypes.h"
|
b538d8e8487066bd1cb82d848db4501b27bf49bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mex.h"
#define BLOCKSIZE_X 16
#define BLOCKSIZE_Y 16
#define BLOCKSIZE_Z 4
__host__ void host_diff(float *img1, float *img, int nx, int ny, int nz, int ind);
__global__ void kernel_diff(float *img1, float *img, int nx, int ny, int nz, int ind);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
// Macro for input and output
#define IN_IMG prhs[0]
#define GEO_PARA prhs[1]
#define OUT_IMG plhs[0]
int nx, ny, nz, ind;
// resolutions of volumes
if (mxGetField(GEO_PARA, 0, "nx") != NULL)
nx = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nx"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nx.\n");
if (mxGetField(GEO_PARA, 0, "ny") != NULL)
ny = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "ny"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution ny.\n");
if (mxGetField(GEO_PARA, 0, "nz") != NULL)
nz = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nz"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nz.\n");
int numImg = nx * ny * nz; // size of image
int numBytesImg = numImg * sizeof(float); // number of bytes in image
if (mxGetField(GEO_PARA, 0, "ind") != NULL)
ind = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "ind"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found ind.\n");
float *h_img;
h_img = (float*)mxGetData(IN_IMG);
float *d_img, *d_img1;
hipMalloc((void**)&d_img, numBytesImg);
hipMalloc((void**)&d_img1, numBytesImg);
hipMemcpy(d_img, h_img, numBytesImg, hipMemcpyHostToDevice);
host_diff(d_img1, d_img, nx, ny, nz, ind);
OUT_IMG = mxCreateNumericMatrix(0, 0, mxSINGLE_CLASS, mxREAL);
const mwSize outDim[3] = {(mwSize)nx, (mwSize)ny, (mwSize)nz};
mxSetDimensions(OUT_IMG, outDim, 3);
mxSetData(OUT_IMG, mxMalloc(numBytesImg));
float *h_outimg = (float*)mxGetData(OUT_IMG);
hipMemcpy(h_outimg, d_img1, numBytesImg, hipMemcpyDeviceToHost);
hipFree(d_img1);
hipFree(d_img);
hipDeviceReset();
return;
}
__host__ void host_diff(float *img1, float *img, int nx, int ny, int nz, int ind)
{
const dim3 gridSize((nx + BLOCKSIZE_X - 1) / BLOCKSIZE_X, (ny + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y, (nz + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z);
const dim3 blockSize(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z);
hipLaunchKernelGGL(( kernel_diff), dim3(gridSize), dim3(blockSize), 0, 0, img1, img, nx, ny, nz, ind);
hipDeviceSynchronize();
}
__global__ void kernel_diff(float *img1, float *img, int nx, int ny, int nz, int ind)
{
int ix = BLOCKSIZE_X * blockIdx.x + threadIdx.x;
int iy = BLOCKSIZE_Y * blockIdx.y + threadIdx.y;
int iz = BLOCKSIZE_Z * blockIdx.z + threadIdx.z;
if (ix >= nx || iy >= ny || iz >= nz)
return;
int id = ix + iy * nx + iz * nx * ny;
switch (ind)
{
case 1:
if (ix == nx - 1)
img1[id] = 0.0f;
else if (ix == 0)
img1[id] = 0.0f;
else
img1[id] = img[id] - img[id - 1];
break;
case 2:
if (iy == ny - 1)
img1[id] = 0.0f;
else if (iy == 0)
img1[id] = 0.0f;
else
img1[id] = img[id] - img[id - nx];
break;
case 3:
if (iz == nz - 1)
img1[id] = 0.0f;
else if (iz == 0)
img1[id] = 0.0f;
else
img1[id] = img[id] - img[id - nx * ny];
break;
}
}
|
b538d8e8487066bd1cb82d848db4501b27bf49bb.cu
|
#include "mex.h"
#define BLOCKSIZE_X 16
#define BLOCKSIZE_Y 16
#define BLOCKSIZE_Z 4
__host__ void host_diff(float *img1, float *img, int nx, int ny, int nz, int ind);
__global__ void kernel_diff(float *img1, float *img, int nx, int ny, int nz, int ind);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
// Macro for input and output
#define IN_IMG prhs[0]
#define GEO_PARA prhs[1]
#define OUT_IMG plhs[0]
int nx, ny, nz, ind;
// resolutions of volumes
if (mxGetField(GEO_PARA, 0, "nx") != NULL)
nx = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nx"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nx.\n");
if (mxGetField(GEO_PARA, 0, "ny") != NULL)
ny = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "ny"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution ny.\n");
if (mxGetField(GEO_PARA, 0, "nz") != NULL)
nz = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "nz"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found valid volume resolution nz.\n");
int numImg = nx * ny * nz; // size of image
int numBytesImg = numImg * sizeof(float); // number of bytes in image
if (mxGetField(GEO_PARA, 0, "ind") != NULL)
ind = (int)mxGetScalar(mxGetField(GEO_PARA, 0, "ind"));
else
mexErrMsgIdAndTxt("MATLAB:badInput","Can't found ind.\n");
float *h_img;
h_img = (float*)mxGetData(IN_IMG);
float *d_img, *d_img1;
cudaMalloc((void**)&d_img, numBytesImg);
cudaMalloc((void**)&d_img1, numBytesImg);
cudaMemcpy(d_img, h_img, numBytesImg, cudaMemcpyHostToDevice);
host_diff(d_img1, d_img, nx, ny, nz, ind);
OUT_IMG = mxCreateNumericMatrix(0, 0, mxSINGLE_CLASS, mxREAL);
const mwSize outDim[3] = {(mwSize)nx, (mwSize)ny, (mwSize)nz};
mxSetDimensions(OUT_IMG, outDim, 3);
mxSetData(OUT_IMG, mxMalloc(numBytesImg));
float *h_outimg = (float*)mxGetData(OUT_IMG);
cudaMemcpy(h_outimg, d_img1, numBytesImg, cudaMemcpyDeviceToHost);
cudaFree(d_img1);
cudaFree(d_img);
cudaDeviceReset();
return;
}
__host__ void host_diff(float *img1, float *img, int nx, int ny, int nz, int ind)
{
const dim3 gridSize((nx + BLOCKSIZE_X - 1) / BLOCKSIZE_X, (ny + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y, (nz + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z);
const dim3 blockSize(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z);
kernel_diff<<<gridSize, blockSize>>>(img1, img, nx, ny, nz, ind);
cudaDeviceSynchronize();
}
__global__ void kernel_diff(float *img1, float *img, int nx, int ny, int nz, int ind)
{
int ix = BLOCKSIZE_X * blockIdx.x + threadIdx.x;
int iy = BLOCKSIZE_Y * blockIdx.y + threadIdx.y;
int iz = BLOCKSIZE_Z * blockIdx.z + threadIdx.z;
if (ix >= nx || iy >= ny || iz >= nz)
return;
int id = ix + iy * nx + iz * nx * ny;
switch (ind)
{
case 1:
if (ix == nx - 1)
img1[id] = 0.0f;
else if (ix == 0)
img1[id] = 0.0f;
else
img1[id] = img[id] - img[id - 1];
break;
case 2:
if (iy == ny - 1)
img1[id] = 0.0f;
else if (iy == 0)
img1[id] = 0.0f;
else
img1[id] = img[id] - img[id - nx];
break;
case 3:
if (iz == nz - 1)
img1[id] = 0.0f;
else if (iz == 0)
img1[id] = 0.0f;
else
img1[id] = img[id] - img[id - nx * ny];
break;
}
}
|
d556e8a62598e858dfe9ea15aeccff479be25c43.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include "backend/kernel_compiler/gpu/cuda_impl/layer_norm_impl.cuh"
constexpr int NUM_PER_THREAD_REDUCE = 4;
constexpr int WARP_SIZE = 32;
template <typename T>
inline __device__ void MeanAndVarAccumulation(T *mean, T *var, T *num, const T &val) {
// Welford Algorithm:
// \mu_k = \mu_{k-1} + (x_k - \mu_{k-1})/k
// \sigma_k^2 = \sigma_{k-1}^2 + (x_k - \mu_{k-1}) * (x_k - \mu_k)
num[0]++;
T mean_new = mean[0] + (val - mean[0]) / num[0];
var[0] = var[0] + (val - mean[0]) * (val - mean_new);
mean[0] = mean_new;
}
template <typename T>
inline __device__ void MeanAndVarMerge(T *m1, T *v1, T *n1, const T &m2, const T &v2, const T &n2) {
T zero = 0;
if (n2 == zero) {
return;
}
T count = n1[0] + n2;
v1[0] = v1[0] + v2 + (m1[0] - m2) * (m1[0] - m2) * n1[0] * n2 / count;
m1[0] = (n1[0] * m1[0] + n2 * m2) / count;
n1[0] = count;
}
template <typename T>
inline __device__ void ThreadReduce(const int &col_dim, const T *block_addr, T *mean, T *var, T *num) {
int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE;
for (int i = threadIdx.x; i < loop_num; i += blockDim.x) {
for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) {
int pos = NUM_PER_THREAD_REDUCE * i + j;
if (pos >= col_dim) {
return;
}
MeanAndVarAccumulation(mean, var, num, block_addr[pos]);
}
}
}
template <typename T>
inline __device__ void WarpReduce(T *mean, T *var, T *num) {
for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) {
T mean_other = __shfl_down_sync(0xffffffff, mean[0], delta);
T var_other = __shfl_down_sync(0xffffffff, var[0], delta);
T num_other = __shfl_down_sync(0xffffffff, num[0], delta);
MeanAndVarMerge(mean, var, num, mean_other, var_other, num_other);
}
}
template <typename T>
inline __device__ void BlockReduce(const int &col_dim, T *mean, T *var, T *num, T *mean_addr, T *var_addr,
T *share_mem) {
// load data to share memory
// thread(0, 32, 64, 96, ...) keep the data
if (threadIdx.x % WARP_SIZE == 0) {
int offset = threadIdx.x / WARP_SIZE * 3;
share_mem[offset] = mean[0];
share_mem[offset + 1] = var[0];
share_mem[offset + 2] = num[0];
}
__syncthreads();
for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) {
if (threadIdx.x < stride) {
int offset = (threadIdx.x + stride) * 3;
MeanAndVarMerge(&share_mem[threadIdx.x * 3], &share_mem[threadIdx.x * 3 + 1], &share_mem[threadIdx.x * 3 + 2],
share_mem[offset], share_mem[offset + 1], share_mem[offset + 2]);
}
}
__syncthreads();
if (threadIdx.x == 0) {
mean_addr[blockIdx.x] = share_mem[0];
share_mem[1] /= col_dim;
var_addr[blockIdx.x] = share_mem[1];
}
}
template <typename T>
inline __device__ void LayerNorm(const int &row, const int &col_dim, const int ¶m_dim, const T *x,
const T *share_mem, const T *gamma, const T *beta, const T epsilon, T *y) {
for (int col = threadIdx.x; col < col_dim; col += blockDim.x) {
int pos = row * col_dim + col;
int i = pos % param_dim;
y[pos] = (x[pos] - share_mem[0]) / sqrt(share_mem[1] + epsilon) * gamma[i] + beta[i];
}
}
template <>
inline __device__ void LayerNorm(const int &row, const int &col_dim, const int ¶m_dim, const half *x,
const half *share_mem, const half *gamma, const half *beta, const half epsilon,
half *y) {
for (int col = threadIdx.x; col < col_dim; col += blockDim.x) {
int pos = row * col_dim + col;
int i = pos % param_dim;
y[pos] = (x[pos] - share_mem[0]) / hsqrt(share_mem[1] + epsilon) * gamma[i] + beta[i];
}
}
template <typename T>
__global__ void LayerNormKernel(const int row_dim, const int col_dim, const int param_dim, const T epsilon, const T *x,
const T *gamma, const T *beta, T *y, T *mean_addr, T *var_addr) {
for (auto row = blockIdx.x; row < row_dim; row += gridDim.x) {
T mean = 0;
T var = 0;
T num = 0;
const T *block_addr = x + row * col_dim;
DynamicSharedMem<T> share_mem;
ThreadReduce(col_dim, block_addr, &mean, &var, &num);
WarpReduce(&mean, &var, &num);
BlockReduce(col_dim, &mean, &var, &num, mean_addr, var_addr, share_mem.addr());
__syncthreads();
LayerNorm(row, col_dim, param_dim, x, share_mem.addr(), gamma, beta, epsilon, y);
}
}
template <typename T>
void LayerNorm(const int &row_dim, const int &col_dim, const int ¶m_dim, const T &epsilon, const T *x,
const T *gamma, const T *beta, T *y, T *mean, T *var, hipStream_t stream) {
const int thread_per_block = 256;
// keep the mean/var/num after warp reduce
int share_mem_size = thread_per_block / WARP_SIZE * 3 * sizeof(T);
hipLaunchKernelGGL(( LayerNormKernel), dim3(row_dim), dim3(thread_per_block), share_mem_size, stream, row_dim, col_dim, param_dim, epsilon, x, gamma,
beta, y, mean, var);
}
template void LayerNorm(const int &row_dim, const int &col_dim, const int ¶m_dim, const float &epsilon,
const float *x, const float *gamma, const float *beta, float *y, float *mean, float *var,
hipStream_t stream);
template void LayerNorm(const int &row_dim, const int &col_dim, const int ¶m_dim, const half &epsilon,
const half *x, const half *gamma, const half *beta, half *y, half *mean, half *var,
hipStream_t stream);
|
d556e8a62598e858dfe9ea15aeccff479be25c43.cu
|
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include "backend/kernel_compiler/gpu/cuda_impl/layer_norm_impl.cuh"
constexpr int NUM_PER_THREAD_REDUCE = 4;
constexpr int WARP_SIZE = 32;
template <typename T>
inline __device__ void MeanAndVarAccumulation(T *mean, T *var, T *num, const T &val) {
// Welford Algorithm:
// \mu_k = \mu_{k-1} + (x_k - \mu_{k-1})/k
// \sigma_k^2 = \sigma_{k-1}^2 + (x_k - \mu_{k-1}) * (x_k - \mu_k)
num[0]++;
T mean_new = mean[0] + (val - mean[0]) / num[0];
var[0] = var[0] + (val - mean[0]) * (val - mean_new);
mean[0] = mean_new;
}
template <typename T>
inline __device__ void MeanAndVarMerge(T *m1, T *v1, T *n1, const T &m2, const T &v2, const T &n2) {
T zero = 0;
if (n2 == zero) {
return;
}
T count = n1[0] + n2;
v1[0] = v1[0] + v2 + (m1[0] - m2) * (m1[0] - m2) * n1[0] * n2 / count;
m1[0] = (n1[0] * m1[0] + n2 * m2) / count;
n1[0] = count;
}
template <typename T>
inline __device__ void ThreadReduce(const int &col_dim, const T *block_addr, T *mean, T *var, T *num) {
int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE;
for (int i = threadIdx.x; i < loop_num; i += blockDim.x) {
for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) {
int pos = NUM_PER_THREAD_REDUCE * i + j;
if (pos >= col_dim) {
return;
}
MeanAndVarAccumulation(mean, var, num, block_addr[pos]);
}
}
}
template <typename T>
inline __device__ void WarpReduce(T *mean, T *var, T *num) {
for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) {
T mean_other = __shfl_down_sync(0xffffffff, mean[0], delta);
T var_other = __shfl_down_sync(0xffffffff, var[0], delta);
T num_other = __shfl_down_sync(0xffffffff, num[0], delta);
MeanAndVarMerge(mean, var, num, mean_other, var_other, num_other);
}
}
template <typename T>
inline __device__ void BlockReduce(const int &col_dim, T *mean, T *var, T *num, T *mean_addr, T *var_addr,
T *share_mem) {
// load data to share memory
// thread(0, 32, 64, 96, ...) keep the data
if (threadIdx.x % WARP_SIZE == 0) {
int offset = threadIdx.x / WARP_SIZE * 3;
share_mem[offset] = mean[0];
share_mem[offset + 1] = var[0];
share_mem[offset + 2] = num[0];
}
__syncthreads();
for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) {
if (threadIdx.x < stride) {
int offset = (threadIdx.x + stride) * 3;
MeanAndVarMerge(&share_mem[threadIdx.x * 3], &share_mem[threadIdx.x * 3 + 1], &share_mem[threadIdx.x * 3 + 2],
share_mem[offset], share_mem[offset + 1], share_mem[offset + 2]);
}
}
__syncthreads();
if (threadIdx.x == 0) {
mean_addr[blockIdx.x] = share_mem[0];
share_mem[1] /= col_dim;
var_addr[blockIdx.x] = share_mem[1];
}
}
template <typename T>
inline __device__ void LayerNorm(const int &row, const int &col_dim, const int ¶m_dim, const T *x,
const T *share_mem, const T *gamma, const T *beta, const T epsilon, T *y) {
for (int col = threadIdx.x; col < col_dim; col += blockDim.x) {
int pos = row * col_dim + col;
int i = pos % param_dim;
y[pos] = (x[pos] - share_mem[0]) / sqrt(share_mem[1] + epsilon) * gamma[i] + beta[i];
}
}
template <>
inline __device__ void LayerNorm(const int &row, const int &col_dim, const int ¶m_dim, const half *x,
const half *share_mem, const half *gamma, const half *beta, const half epsilon,
half *y) {
for (int col = threadIdx.x; col < col_dim; col += blockDim.x) {
int pos = row * col_dim + col;
int i = pos % param_dim;
y[pos] = (x[pos] - share_mem[0]) / hsqrt(share_mem[1] + epsilon) * gamma[i] + beta[i];
}
}
template <typename T>
__global__ void LayerNormKernel(const int row_dim, const int col_dim, const int param_dim, const T epsilon, const T *x,
const T *gamma, const T *beta, T *y, T *mean_addr, T *var_addr) {
for (auto row = blockIdx.x; row < row_dim; row += gridDim.x) {
T mean = 0;
T var = 0;
T num = 0;
const T *block_addr = x + row * col_dim;
DynamicSharedMem<T> share_mem;
ThreadReduce(col_dim, block_addr, &mean, &var, &num);
WarpReduce(&mean, &var, &num);
BlockReduce(col_dim, &mean, &var, &num, mean_addr, var_addr, share_mem.addr());
__syncthreads();
LayerNorm(row, col_dim, param_dim, x, share_mem.addr(), gamma, beta, epsilon, y);
}
}
template <typename T>
void LayerNorm(const int &row_dim, const int &col_dim, const int ¶m_dim, const T &epsilon, const T *x,
const T *gamma, const T *beta, T *y, T *mean, T *var, cudaStream_t stream) {
const int thread_per_block = 256;
// keep the mean/var/num after warp reduce
int share_mem_size = thread_per_block / WARP_SIZE * 3 * sizeof(T);
LayerNormKernel<<<row_dim, thread_per_block, share_mem_size, stream>>>(row_dim, col_dim, param_dim, epsilon, x, gamma,
beta, y, mean, var);
}
template void LayerNorm(const int &row_dim, const int &col_dim, const int ¶m_dim, const float &epsilon,
const float *x, const float *gamma, const float *beta, float *y, float *mean, float *var,
cudaStream_t stream);
template void LayerNorm(const int &row_dim, const int &col_dim, const int ¶m_dim, const half &epsilon,
const half *x, const half *gamma, const half *beta, half *y, half *mean, half *var,
cudaStream_t stream);
|
vector.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <type_traits>
#include <wmma_extension/mma_simt.hpp>
#include "utils.hpp"
#ifdef WMMAE_USE_NVCUDA_NAMESPACE
namespace f32_namespace = nvcuda;
#else
namespace f32_namespace = mtk;
#endif
__device__ half abs(const half a) {
if (__half2float(a) < 0) {
return -a;
}
return a;
}
/// Load
template <class Use, int m, int n, int k, class T, class Layout>
__global__ void load_vector_ab_test_kernel(
const float* const cor_ptr,
const float* const src_ptr
) {
mtk::wmma::mma_simt::fragment<Use, m, n, k, T, Layout> frag, frag_c;
mtk::wmma::mma_simt::fill_fragment(frag, 0.0f);
mtk::wmma::mma_simt::load_vector(frag, src_ptr);
constexpr unsigned mem_m = mtk::wmma::mma_simt::detail::select_value<Use, m, k, m>::value;
mtk::wmma::mma_simt::load_matrix_sync(frag_c, cor_ptr, mem_m);
float max_error = 0;
for (unsigned i = 0; i < frag.num_elements; i++) {
max_error = max(max_error, abs(mtk::wmma::mma_simt::detail::cast<float>(frag.x[i] - frag_c.x[i])));
}
for (unsigned i = 0; i < mtk::test_utils::warp_size; i++) {
__syncthreads();
if (i == threadIdx.x) printf("[%u] %e\n", i, max_error);
}
}
template <int m, int n, int k, class T>
__global__ void load_vector_acc_test_kernel(
const float* const cor_ptr,
const float* const src_ptr,
const nvcuda::wmma::layout_t layout
) {
mtk::wmma::mma_simt::fragment<nvcuda::wmma::accumulator, m, n, k, T, void> frag, frag_c;
mtk::wmma::mma_simt::fill_fragment(frag, 0.0f);
mtk::wmma::mma_simt::load_vector(frag, src_ptr, layout);
constexpr unsigned mem_m = m;
mtk::wmma::mma_simt::load_matrix_sync(frag_c, cor_ptr, mem_m, layout);
float max_error = 0;
for (unsigned i = 0; i < frag.num_elements; i++) {
max_error = max(max_error, abs(mtk::wmma::mma_simt::detail::cast<float>(frag.x[i] - frag_c.x[i])));
}
for (unsigned i = 0; i < mtk::test_utils::warp_size; i++) {
__syncthreads();
if (i == threadIdx.x) printf("[%u] %e\n", i, max_error);
}
}
template <class Use, int m, int n, int k, class T, class Layout>
void load_vector_test() {
std::printf("!-- %s\n", __func__);
std::printf("Use : %s\n", mtk::test_utils::to_string<Use>().c_str());
std::printf("Layout : %s\n", mtk::test_utils::to_string<Layout>().c_str());
std::printf("Type : %s\n", mtk::test_utils::to_string<T>().c_str());
std::printf("Size : %u, %u, %u\n", m, n, k);
std::printf("Shape : <%2u,%2u,%2u>\n",
m,
n,
k);
constexpr unsigned mem_m = mtk::wmma::mma_simt::detail::select_value<Use, m, k, m>::value;
constexpr unsigned mem_n = mtk::wmma::mma_simt::detail::select_value<Use, k, n, n>::value;
constexpr auto vec_len = std::is_same<Layout, nvcuda::wmma::col_major>::value ? mem_m : mem_n;
float* vec_mem;
float* mat_mem;
hipHostMalloc(&mat_mem, sizeof(float) * mem_m * mem_n);
hipHostMalloc(&vec_mem, sizeof(float) * vec_len);
for (unsigned i = 0; i < vec_len; i++) {
vec_mem[i] = i;
}
for (unsigned i = 0; i < mem_m * mem_n; i++) {
mat_mem[i] = 0.f;
}
for (unsigned i = 0; i < vec_len; i++) {
mat_mem[i] = vec_mem[i];
}
if constexpr (std::is_same<Use, nvcuda::wmma::accumulator>::value) {
const auto layout = (std::is_same<nvcuda::wmma::col_major, Layout>::value) ? nvcuda::wmma::mem_col_major : nvcuda::wmma::mem_row_major;
hipLaunchKernelGGL(( load_vector_acc_test_kernel<m, n, k, T>), dim3(1), dim3(mtk::test_utils::warp_size), 0, 0, mat_mem, vec_mem, layout);
} else {
hipLaunchKernelGGL(( load_vector_ab_test_kernel<Use, m, n, k, T, Layout>), dim3(1), dim3(mtk::test_utils::warp_size), 0, 0, mat_mem, vec_mem);
}
hipDeviceSynchronize();
hipFree(vec_mem);
hipFree(mat_mem);
}
/// Store
template <int m, int n, int k, class T>
__global__ void store_vector_acc_test_kernel(
float* const dst_ptr,
const float* const src_ptr,
const nvcuda::wmma::layout_t layout
) {
mtk::wmma::mma_simt::fragment<nvcuda::wmma::accumulator, m, n, k, T, void> frag;
constexpr unsigned mem_m = m;
mtk::wmma::mma_simt::load_matrix_sync(frag, src_ptr, mem_m, layout);
mtk::wmma::mma_simt::store_vector(dst_ptr, frag, layout);
}
template <class Use, int m, int n, int k, class T, class Layout>
void store_vector_test() {
std::printf("!-- %s\n", __func__);
std::printf("Use : %s\n", mtk::test_utils::to_string<Use>().c_str());
std::printf("Layout : %s\n", mtk::test_utils::to_string<Layout>().c_str());
std::printf("Type : %s\n", mtk::test_utils::to_string<T>().c_str());
std::printf("Size : %u, %u, %u\n", m, n, k);
std::printf("Shape : <%2u,%2u,%2u>\n",
m,
n,
k);
constexpr unsigned mem_m = mtk::wmma::mma_simt::detail::select_value<Use, m, k, m>::value;
constexpr unsigned mem_n = mtk::wmma::mma_simt::detail::select_value<Use, k, n, n>::value;
constexpr auto vec_len = std::is_same<Layout, nvcuda::wmma::col_major>::value ? mem_m : mem_n;
float* vec_mem;
float* res_mem;
float* mat_mem;
hipHostMalloc(&mat_mem, sizeof(float) * mem_m * mem_n);
hipHostMalloc(&vec_mem, sizeof(float) * vec_len);
hipHostMalloc(&res_mem, sizeof(float) * vec_len);
for (unsigned i = 0; i < vec_len; i++) {
vec_mem[i] = i;
}
for (unsigned i = 0; i < mem_m * mem_n; i++) {
mat_mem[i] = 0.f;
}
for (unsigned i = 0; i < vec_len; i++) {
mat_mem[i] = vec_mem[i];
}
const auto layout = (std::is_same<nvcuda::wmma::col_major, Layout>::value) ? nvcuda::wmma::mem_col_major : nvcuda::wmma::mem_row_major;
hipLaunchKernelGGL(( store_vector_acc_test_kernel<m, n, k, T>), dim3(1), dim3(mtk::test_utils::warp_size), 0, 0, mat_mem, mat_mem, layout);
hipDeviceSynchronize();
float max_error = 0.0f;
for (unsigned i = 0; i < vec_len; i++) {
const auto diff = mat_mem[i] - vec_mem[i];
max_error = ::max(max_error, std::abs(diff));
}
std::printf("Error : %e\n", max_error);
hipFree(res_mem);
hipFree(vec_mem);
hipFree(mat_mem);
}
int main() {
load_vector_test<nvcuda::wmma::matrix_a , 16, 16, 16, half , nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::matrix_b , 16, 16, 16, half , nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::accumulator , 16, 16, 16, half , nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::matrix_a , 16, 16, 16, half , nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::matrix_b , 16, 16, 16, half , nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::accumulator , 16, 16, 16, half , nvcuda::wmma::row_major>();
store_vector_test<nvcuda::wmma::accumulator, 16, 16, 16, half , nvcuda::wmma::col_major>();
store_vector_test<nvcuda::wmma::accumulator, 16, 16, 16, half , nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::matrix_a , 16, 16, 16, float , nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::matrix_b , 16, 16, 16, float , nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::accumulator , 16, 16, 16, float , nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::matrix_a , 16, 16, 16, float , nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::matrix_b , 16, 16, 16, float , nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::accumulator , 16, 16, 16, float , nvcuda::wmma::row_major>();
store_vector_test<nvcuda::wmma::accumulator, 16, 16, 16, float , nvcuda::wmma::col_major>();
store_vector_test<nvcuda::wmma::accumulator, 16, 16, 16, float , nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::matrix_a , 16, 16, 16, double, nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::matrix_b , 16, 16, 16, double, nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::accumulator , 16, 16, 16, double, nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::matrix_a , 16, 16, 16, double, nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::matrix_b , 16, 16, 16, double, nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::accumulator , 16, 16, 16, double, nvcuda::wmma::row_major>();
store_vector_test<nvcuda::wmma::accumulator, 16, 16, 16, double, nvcuda::wmma::col_major>();
store_vector_test<nvcuda::wmma::accumulator, 16, 16, 16, double, nvcuda::wmma::row_major>();
}
|
vector.cu
|
#include <iostream>
#include <type_traits>
#include <wmma_extension/mma_simt.hpp>
#include "utils.hpp"
#ifdef WMMAE_USE_NVCUDA_NAMESPACE
namespace f32_namespace = nvcuda;
#else
namespace f32_namespace = mtk;
#endif
__device__ half abs(const half a) {
if (__half2float(a) < 0) {
return -a;
}
return a;
}
/// Load
template <class Use, int m, int n, int k, class T, class Layout>
__global__ void load_vector_ab_test_kernel(
const float* const cor_ptr,
const float* const src_ptr
) {
mtk::wmma::mma_simt::fragment<Use, m, n, k, T, Layout> frag, frag_c;
mtk::wmma::mma_simt::fill_fragment(frag, 0.0f);
mtk::wmma::mma_simt::load_vector(frag, src_ptr);
constexpr unsigned mem_m = mtk::wmma::mma_simt::detail::select_value<Use, m, k, m>::value;
mtk::wmma::mma_simt::load_matrix_sync(frag_c, cor_ptr, mem_m);
float max_error = 0;
for (unsigned i = 0; i < frag.num_elements; i++) {
max_error = max(max_error, abs(mtk::wmma::mma_simt::detail::cast<float>(frag.x[i] - frag_c.x[i])));
}
for (unsigned i = 0; i < mtk::test_utils::warp_size; i++) {
__syncthreads();
if (i == threadIdx.x) printf("[%u] %e\n", i, max_error);
}
}
template <int m, int n, int k, class T>
__global__ void load_vector_acc_test_kernel(
const float* const cor_ptr,
const float* const src_ptr,
const nvcuda::wmma::layout_t layout
) {
mtk::wmma::mma_simt::fragment<nvcuda::wmma::accumulator, m, n, k, T, void> frag, frag_c;
mtk::wmma::mma_simt::fill_fragment(frag, 0.0f);
mtk::wmma::mma_simt::load_vector(frag, src_ptr, layout);
constexpr unsigned mem_m = m;
mtk::wmma::mma_simt::load_matrix_sync(frag_c, cor_ptr, mem_m, layout);
float max_error = 0;
for (unsigned i = 0; i < frag.num_elements; i++) {
max_error = max(max_error, abs(mtk::wmma::mma_simt::detail::cast<float>(frag.x[i] - frag_c.x[i])));
}
for (unsigned i = 0; i < mtk::test_utils::warp_size; i++) {
__syncthreads();
if (i == threadIdx.x) printf("[%u] %e\n", i, max_error);
}
}
template <class Use, int m, int n, int k, class T, class Layout>
void load_vector_test() {
std::printf("!-- %s\n", __func__);
std::printf("Use : %s\n", mtk::test_utils::to_string<Use>().c_str());
std::printf("Layout : %s\n", mtk::test_utils::to_string<Layout>().c_str());
std::printf("Type : %s\n", mtk::test_utils::to_string<T>().c_str());
std::printf("Size : %u, %u, %u\n", m, n, k);
std::printf("Shape : <%2u,%2u,%2u>\n",
m,
n,
k);
constexpr unsigned mem_m = mtk::wmma::mma_simt::detail::select_value<Use, m, k, m>::value;
constexpr unsigned mem_n = mtk::wmma::mma_simt::detail::select_value<Use, k, n, n>::value;
constexpr auto vec_len = std::is_same<Layout, nvcuda::wmma::col_major>::value ? mem_m : mem_n;
float* vec_mem;
float* mat_mem;
cudaMallocHost(&mat_mem, sizeof(float) * mem_m * mem_n);
cudaMallocHost(&vec_mem, sizeof(float) * vec_len);
for (unsigned i = 0; i < vec_len; i++) {
vec_mem[i] = i;
}
for (unsigned i = 0; i < mem_m * mem_n; i++) {
mat_mem[i] = 0.f;
}
for (unsigned i = 0; i < vec_len; i++) {
mat_mem[i] = vec_mem[i];
}
if constexpr (std::is_same<Use, nvcuda::wmma::accumulator>::value) {
const auto layout = (std::is_same<nvcuda::wmma::col_major, Layout>::value) ? nvcuda::wmma::mem_col_major : nvcuda::wmma::mem_row_major;
load_vector_acc_test_kernel<m, n, k, T><<<1, mtk::test_utils::warp_size>>>(mat_mem, vec_mem, layout);
} else {
load_vector_ab_test_kernel<Use, m, n, k, T, Layout><<<1, mtk::test_utils::warp_size>>>(mat_mem, vec_mem);
}
cudaDeviceSynchronize();
cudaFree(vec_mem);
cudaFree(mat_mem);
}
/// Store
template <int m, int n, int k, class T>
__global__ void store_vector_acc_test_kernel(
float* const dst_ptr,
const float* const src_ptr,
const nvcuda::wmma::layout_t layout
) {
mtk::wmma::mma_simt::fragment<nvcuda::wmma::accumulator, m, n, k, T, void> frag;
constexpr unsigned mem_m = m;
mtk::wmma::mma_simt::load_matrix_sync(frag, src_ptr, mem_m, layout);
mtk::wmma::mma_simt::store_vector(dst_ptr, frag, layout);
}
template <class Use, int m, int n, int k, class T, class Layout>
void store_vector_test() {
std::printf("!-- %s\n", __func__);
std::printf("Use : %s\n", mtk::test_utils::to_string<Use>().c_str());
std::printf("Layout : %s\n", mtk::test_utils::to_string<Layout>().c_str());
std::printf("Type : %s\n", mtk::test_utils::to_string<T>().c_str());
std::printf("Size : %u, %u, %u\n", m, n, k);
std::printf("Shape : <%2u,%2u,%2u>\n",
m,
n,
k);
constexpr unsigned mem_m = mtk::wmma::mma_simt::detail::select_value<Use, m, k, m>::value;
constexpr unsigned mem_n = mtk::wmma::mma_simt::detail::select_value<Use, k, n, n>::value;
constexpr auto vec_len = std::is_same<Layout, nvcuda::wmma::col_major>::value ? mem_m : mem_n;
float* vec_mem;
float* res_mem;
float* mat_mem;
cudaMallocHost(&mat_mem, sizeof(float) * mem_m * mem_n);
cudaMallocHost(&vec_mem, sizeof(float) * vec_len);
cudaMallocHost(&res_mem, sizeof(float) * vec_len);
for (unsigned i = 0; i < vec_len; i++) {
vec_mem[i] = i;
}
for (unsigned i = 0; i < mem_m * mem_n; i++) {
mat_mem[i] = 0.f;
}
for (unsigned i = 0; i < vec_len; i++) {
mat_mem[i] = vec_mem[i];
}
const auto layout = (std::is_same<nvcuda::wmma::col_major, Layout>::value) ? nvcuda::wmma::mem_col_major : nvcuda::wmma::mem_row_major;
store_vector_acc_test_kernel<m, n, k, T><<<1, mtk::test_utils::warp_size>>>(mat_mem, mat_mem, layout);
cudaDeviceSynchronize();
float max_error = 0.0f;
for (unsigned i = 0; i < vec_len; i++) {
const auto diff = mat_mem[i] - vec_mem[i];
max_error = std::max(max_error, std::abs(diff));
}
std::printf("Error : %e\n", max_error);
cudaFree(res_mem);
cudaFree(vec_mem);
cudaFree(mat_mem);
}
int main() {
load_vector_test<nvcuda::wmma::matrix_a , 16, 16, 16, half , nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::matrix_b , 16, 16, 16, half , nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::accumulator , 16, 16, 16, half , nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::matrix_a , 16, 16, 16, half , nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::matrix_b , 16, 16, 16, half , nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::accumulator , 16, 16, 16, half , nvcuda::wmma::row_major>();
store_vector_test<nvcuda::wmma::accumulator, 16, 16, 16, half , nvcuda::wmma::col_major>();
store_vector_test<nvcuda::wmma::accumulator, 16, 16, 16, half , nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::matrix_a , 16, 16, 16, float , nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::matrix_b , 16, 16, 16, float , nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::accumulator , 16, 16, 16, float , nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::matrix_a , 16, 16, 16, float , nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::matrix_b , 16, 16, 16, float , nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::accumulator , 16, 16, 16, float , nvcuda::wmma::row_major>();
store_vector_test<nvcuda::wmma::accumulator, 16, 16, 16, float , nvcuda::wmma::col_major>();
store_vector_test<nvcuda::wmma::accumulator, 16, 16, 16, float , nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::matrix_a , 16, 16, 16, double, nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::matrix_b , 16, 16, 16, double, nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::accumulator , 16, 16, 16, double, nvcuda::wmma::col_major>();
load_vector_test<nvcuda::wmma::matrix_a , 16, 16, 16, double, nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::matrix_b , 16, 16, 16, double, nvcuda::wmma::row_major>();
load_vector_test<nvcuda::wmma::accumulator , 16, 16, 16, double, nvcuda::wmma::row_major>();
store_vector_test<nvcuda::wmma::accumulator, 16, 16, 16, double, nvcuda::wmma::col_major>();
store_vector_test<nvcuda::wmma::accumulator, 16, 16, 16, double, nvcuda::wmma::row_major>();
}
|
a2f078600db9c2786712c4963a024a599c3eeb2b.hip
|
// !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "matx.h"
#include "matx_filter.h"
#include <cassert>
#include <cstdio>
#include <cuda/std/ccomplex>
using namespace matx;
/**
* FFT Convolution
*
* This example shows how to perform an FFT convolution using the MatX library.
* The example shows the convolution theorem of:
*
* \f$ h*x \leftrightarrow H \cdot X$
*
* Namely, a convolution in the time domain is a point-wise multiplication in
* the frequency domain. In this example we start with two signals in the time
* domain, convert them to frequency domain, perform the multiply, then convert
* them back to the time domain. This should give very close results to
* performing a direct convolution in the time domain, so the results are
* compared to a direct convolution. They will not match identically since the
* types and order of operations are different, but they will match within a
* close margin.
*
* FFT convolution is frequently used in signal processing when a signal or
* filter is larger than a threshold, since it will outperform direct
* convolution past this threshold. Another benefit of FFT convolution is the
* number of operations is the same, regardless of the filter size. This allows
* a user to FFT a very long filter one time, and that buffer can be used many
* times for any incoming samples.
*
* For smaller signal sizes, the FFT convolution typically performs worse since
* there is some buffer and 3 FFT operations (2 for FFT of signal and filter,
* and 1 IFFT after the multiply) that causes the setup time to dominate.
*
*/
int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv)
{
MATX_ENTER_HANDLER();
using complex = cuda::std::complex<float>;
index_t signal_size = 1ULL << 16;
index_t filter_size = 16;
index_t filtered_size = signal_size + filter_size - 1;
// Create time domain buffers
tensor_t<complex, 1> sig_time({signal_size});
tensor_t<complex, 1> filt_time({filter_size});
tensor_t<complex, 1> time_out({filtered_size});
// Frequency domain buffers
tensor_t<complex, 1> sig_freq({filtered_size});
tensor_t<complex, 1> filt_freq({filtered_size});
// Fill the time domain signals with data
for (index_t i = 0; i < signal_size; i++) {
sig_time(i) = {-1.0f * (2.0f * static_cast<float>(i % 2) + 1.0f) *
(static_cast<float>(i % 10) / 10.0f) +
0.1f,
-1.0f * (static_cast<float>(i % 2) == 0.0f) *
(static_cast<float>(i % 10) / 5.0f) -
0.1f};
}
for (index_t i = 0; i < filter_size; i++) {
filt_time(i) = {static_cast<float>(i) / static_cast<float>(filter_size),
static_cast<float>(-i) / static_cast<float>(filter_size) +
0.5f};
}
// Prefetch the data we just created
sig_time.PrefetchDevice(0);
filt_time.PrefetchDevice(0);
// Perform the FFT in-place on both signal and filter
fft(sig_freq, sig_time);
fft(filt_freq, filt_time);
// Perform the pointwise multiply. Overwrite signal buffer with result
(sig_freq = sig_freq * filt_freq).run();
// IFFT in-place
ifft(sig_freq, sig_freq);
// Now the sig_freq view contains the full convolution result. Verify against
// a direct convolution
conv1d(time_out, sig_time, filt_time, matxConvCorrMode_t::MATX_C_MODE_FULL,
0);
hipStreamSynchronize(0);
// Compare signals
for (index_t i = 0; i < filtered_size; i++) {
if (fabs(time_out(i).real() - sig_freq(i).real()) > 0.001 ||
fabs(time_out(i).imag() - sig_freq(i).imag()) > 0.001) {
printf(
"Verification failed at item %lld. Direct=%f%+.2fj, FFT=%f%+.2fj\n",
i, time_out(i).real(), time_out(i).imag(), sig_freq(i).real(),
sig_freq(i).imag());
return -1;
}
}
std::cout << "Verification successful" << std::endl;
CUDA_CHECK_LAST_ERROR();
MATX_EXIT_HANDLER();
}
|
a2f078600db9c2786712c4963a024a599c3eeb2b.cu
|
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "matx.h"
#include "matx_filter.h"
#include <cassert>
#include <cstdio>
#include <cuda/std/ccomplex>
using namespace matx;
/**
* FFT Convolution
*
* This example shows how to perform an FFT convolution using the MatX library.
* The example shows the convolution theorem of:
*
* \f$ h*x \leftrightarrow H \cdot X$
*
* Namely, a convolution in the time domain is a point-wise multiplication in
* the frequency domain. In this example we start with two signals in the time
* domain, convert them to frequency domain, perform the multiply, then convert
* them back to the time domain. This should give very close results to
* performing a direct convolution in the time domain, so the results are
* compared to a direct convolution. They will not match identically since the
* types and order of operations are different, but they will match within a
* close margin.
*
* FFT convolution is frequently used in signal processing when a signal or
* filter is larger than a threshold, since it will outperform direct
* convolution past this threshold. Another benefit of FFT convolution is the
* number of operations is the same, regardless of the filter size. This allows
* a user to FFT a very long filter one time, and that buffer can be used many
* times for any incoming samples.
*
* For smaller signal sizes, the FFT convolution typically performs worse since
* there is some buffer and 3 FFT operations (2 for FFT of signal and filter,
* and 1 IFFT after the multiply) that causes the setup time to dominate.
*
*/
int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv)
{
MATX_ENTER_HANDLER();
using complex = cuda::std::complex<float>;
index_t signal_size = 1ULL << 16;
index_t filter_size = 16;
index_t filtered_size = signal_size + filter_size - 1;
// Create time domain buffers
tensor_t<complex, 1> sig_time({signal_size});
tensor_t<complex, 1> filt_time({filter_size});
tensor_t<complex, 1> time_out({filtered_size});
// Frequency domain buffers
tensor_t<complex, 1> sig_freq({filtered_size});
tensor_t<complex, 1> filt_freq({filtered_size});
// Fill the time domain signals with data
for (index_t i = 0; i < signal_size; i++) {
sig_time(i) = {-1.0f * (2.0f * static_cast<float>(i % 2) + 1.0f) *
(static_cast<float>(i % 10) / 10.0f) +
0.1f,
-1.0f * (static_cast<float>(i % 2) == 0.0f) *
(static_cast<float>(i % 10) / 5.0f) -
0.1f};
}
for (index_t i = 0; i < filter_size; i++) {
filt_time(i) = {static_cast<float>(i) / static_cast<float>(filter_size),
static_cast<float>(-i) / static_cast<float>(filter_size) +
0.5f};
}
// Prefetch the data we just created
sig_time.PrefetchDevice(0);
filt_time.PrefetchDevice(0);
// Perform the FFT in-place on both signal and filter
fft(sig_freq, sig_time);
fft(filt_freq, filt_time);
// Perform the pointwise multiply. Overwrite signal buffer with result
(sig_freq = sig_freq * filt_freq).run();
// IFFT in-place
ifft(sig_freq, sig_freq);
// Now the sig_freq view contains the full convolution result. Verify against
// a direct convolution
conv1d(time_out, sig_time, filt_time, matxConvCorrMode_t::MATX_C_MODE_FULL,
0);
cudaStreamSynchronize(0);
// Compare signals
for (index_t i = 0; i < filtered_size; i++) {
if (fabs(time_out(i).real() - sig_freq(i).real()) > 0.001 ||
fabs(time_out(i).imag() - sig_freq(i).imag()) > 0.001) {
printf(
"Verification failed at item %lld. Direct=%f%+.2fj, FFT=%f%+.2fj\n",
i, time_out(i).real(), time_out(i).imag(), sig_freq(i).real(),
sig_freq(i).imag());
return -1;
}
}
std::cout << "Verification successful" << std::endl;
CUDA_CHECK_LAST_ERROR();
MATX_EXIT_HANDLER();
}
|
916f059138cc6074e45a958c0d71ad687c9d5e44.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Modified from
// https://github.com/LikeLy-Journey/SegmenTron/blob/master/segmentron/modules/csrc/criss_cross_attention/ca_cuda.cu
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#include "cc_attention_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void CAForwardCUDAKernelLauncher(const Tensor t, const Tensor f,
Tensor weight) {
AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor");
auto n = t.size(0);
auto c = t.size(1);
auto h = t.size(2);
auto w = t.size(3);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = h + w;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_forward", [&] {
hipLaunchKernelGGL(( ca_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
t.contiguous().data_ptr<scalar_t>(),
f.contiguous().data_ptr<scalar_t>(),
weight.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
THCudaCheck(hipGetLastError());
}
void CABackwardCUDAKernelLauncher(const Tensor dw, const Tensor t,
const Tensor f, Tensor dt, Tensor df) {
AT_ASSERTM(dw.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor");
auto n = t.size(0);
auto c = t.size(1);
auto h = t.size(2);
auto w = t.size(3);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = c;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_backward_kernel_t", [&] {
hipLaunchKernelGGL(( ca_backward_kernel_t<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dw.contiguous().data_ptr<scalar_t>(),
t.contiguous().data_ptr<scalar_t>(),
f.contiguous().data_ptr<scalar_t>(),
dt.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
AT_DISPATCH_FLOATING_TYPES(f.scalar_type(), "ca_backward_kernel_f", [&] {
hipLaunchKernelGGL(( ca_backward_kernel_f<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dw.contiguous().data_ptr<scalar_t>(),
t.contiguous().data_ptr<scalar_t>(),
f.contiguous().data_ptr<scalar_t>(),
df.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
THCudaCheck(hipGetLastError());
}
void CAMapForwardCUDAKernelLauncher(const Tensor weight, const Tensor g,
Tensor out) {
AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor");
auto n = g.size(0);
auto c = g.size(1);
auto h = g.size(2);
auto w = g.size(3);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = c;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_forward", [&] {
hipLaunchKernelGGL(( ca_map_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
weight.contiguous().data_ptr<scalar_t>(),
g.contiguous().data_ptr<scalar_t>(),
out.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
THCudaCheck(hipGetLastError());
}
void CAMapBackwardCUDAKernelLauncher(const Tensor dout, const Tensor weight,
const Tensor g, Tensor dw, Tensor dg) {
AT_ASSERTM(dout.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor");
auto n = dout.size(0);
auto c = dout.size(1);
auto h = dout.size(2);
auto w = dout.size(3);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = h + w;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(
weight.scalar_type(), "ca_map_backward_kernel_w", [&] {
hipLaunchKernelGGL(( ca_map_backward_kernel_w<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dout.contiguous().data_ptr<scalar_t>(),
weight.contiguous().data_ptr<scalar_t>(),
g.contiguous().data_ptr<scalar_t>(),
dw.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_backward_kernel_g", [&] {
hipLaunchKernelGGL(( ca_map_backward_kernel_g<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dout.contiguous().data_ptr<scalar_t>(),
weight.contiguous().data_ptr<scalar_t>(),
g.contiguous().data_ptr<scalar_t>(),
dg.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
THCudaCheck(hipGetLastError());
}
|
916f059138cc6074e45a958c0d71ad687c9d5e44.cu
|
// Modified from
// https://github.com/LikeLy-Journey/SegmenTron/blob/master/segmentron/modules/csrc/criss_cross_attention/ca_cuda.cu
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#include "cc_attention_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void CAForwardCUDAKernelLauncher(const Tensor t, const Tensor f,
Tensor weight) {
AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor");
auto n = t.size(0);
auto c = t.size(1);
auto h = t.size(2);
auto w = t.size(3);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = h + w;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_forward", [&] {
ca_forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
t.contiguous().data_ptr<scalar_t>(),
f.contiguous().data_ptr<scalar_t>(),
weight.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
THCudaCheck(cudaGetLastError());
}
void CABackwardCUDAKernelLauncher(const Tensor dw, const Tensor t,
const Tensor f, Tensor dt, Tensor df) {
AT_ASSERTM(dw.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor");
auto n = t.size(0);
auto c = t.size(1);
auto h = t.size(2);
auto w = t.size(3);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = c;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_backward_kernel_t", [&] {
ca_backward_kernel_t<scalar_t><<<blocks, threads, 0, stream>>>(
dw.contiguous().data_ptr<scalar_t>(),
t.contiguous().data_ptr<scalar_t>(),
f.contiguous().data_ptr<scalar_t>(),
dt.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
AT_DISPATCH_FLOATING_TYPES(f.scalar_type(), "ca_backward_kernel_f", [&] {
ca_backward_kernel_f<scalar_t><<<blocks, threads, 0, stream>>>(
dw.contiguous().data_ptr<scalar_t>(),
t.contiguous().data_ptr<scalar_t>(),
f.contiguous().data_ptr<scalar_t>(),
df.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
THCudaCheck(cudaGetLastError());
}
void CAMapForwardCUDAKernelLauncher(const Tensor weight, const Tensor g,
Tensor out) {
AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor");
auto n = g.size(0);
auto c = g.size(1);
auto h = g.size(2);
auto w = g.size(3);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = c;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_forward", [&] {
ca_map_forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
weight.contiguous().data_ptr<scalar_t>(),
g.contiguous().data_ptr<scalar_t>(),
out.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
THCudaCheck(cudaGetLastError());
}
void CAMapBackwardCUDAKernelLauncher(const Tensor dout, const Tensor weight,
const Tensor g, Tensor dw, Tensor dg) {
AT_ASSERTM(dout.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor");
auto n = dout.size(0);
auto c = dout.size(1);
auto h = dout.size(2);
auto w = dout.size(3);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// Run kernel
dim3 threads(32, 32);
int d1 = (w + threads.x - 1) / threads.x;
int d2 = (h + threads.y - 1) / threads.y;
int d3 = h + w;
dim3 blocks(d1, d2, d3);
AT_DISPATCH_FLOATING_TYPES(
weight.scalar_type(), "ca_map_backward_kernel_w", [&] {
ca_map_backward_kernel_w<scalar_t><<<blocks, threads, 0, stream>>>(
dout.contiguous().data_ptr<scalar_t>(),
weight.contiguous().data_ptr<scalar_t>(),
g.contiguous().data_ptr<scalar_t>(),
dw.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_backward_kernel_g", [&] {
ca_map_backward_kernel_g<scalar_t><<<blocks, threads, 0, stream>>>(
dout.contiguous().data_ptr<scalar_t>(),
weight.contiguous().data_ptr<scalar_t>(),
g.contiguous().data_ptr<scalar_t>(),
dg.contiguous().data_ptr<scalar_t>(), n, c, h, w);
});
THCudaCheck(cudaGetLastError());
}
|
ef7db9f6439b958cf498c7fb52c6d120cee3c63c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
///////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
__global__ void updateHeightmapKernel(float* heightMap, float2* ht, unsigned int width){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int i = y*width+x;
float sign_correction = ((x + y) & 0x01) ? -1.0f : 1.0f;
heightMap[i] = ht[i].x * sign_correction;
}
|
ef7db9f6439b958cf498c7fb52c6d120cee3c63c.cu
|
#include "includes.h"
///////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
__global__ void updateHeightmapKernel(float* heightMap, float2* ht, unsigned int width){
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int i = y*width+x;
float sign_correction = ((x + y) & 0x01) ? -1.0f : 1.0f;
heightMap[i] = ht[i].x * sign_correction;
}
|
2b1eedd0a0c804ec5233968984aae73ad55be047.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void createKernels(
float* kernels,
int size,
int nrOfOrientations,
int nrOfScales,
float sigma_min,
int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int orientation = threadIdx.x;
int scale = blockIdx.x;
if (index < N)
{
int s2 = size / 2;
int nn = 11;
float gamma = 20;
// see: http://en.wikipedia.org/wiki/Gabor_filter
float alpha = (3.141592654 * orientation) / nrOfOrientations;
float sigma_adjusted = 0.6 * sigma_min * scale + sigma_min;
float s2d_min = 2 * sigma_adjusted * sigma_adjusted;
float s2d_max = 200;
float lambda = 5 * sigma_adjusted;
float totalSum = 0;
for (int j = -s2; j <= s2; j++) {
for (int i = -s2; i <= s2; i++) {
float sum = 0;
for (int ii = 0; ii < nn; ii++) {
float xx = i - 0.5 + (1 + 2 * ii) / (2.0 * nn);
for (int jj = 0; jj < nn; jj++)
{
float yy = j - 0.5 + (1 + 2 * jj) / (2.0 * nn);
float xx_ = yy * sinf(alpha) + xx * cosf(alpha);
float yy_ = yy * cosf(alpha) - xx * sinf(alpha);
//sum += expf(-(xx_ * xx_) / s2d_max - (yy_ * yy_) / s2d_min) * cosf(2 * 3.141592654 * yy_ / (size));
// Gabor filter
sum += expf(-(xx_ * xx_) / s2d_min / gamma - (yy_ * yy_) / s2d_min) * cosf(2 * 3.141592654 * yy_ / lambda);
}
}
kernels[(i + s2) + (j + s2) * size + size * size * orientation + size * size * nrOfOrientations * scale] = sum;
totalSum += sum;
}
}
for (int j = -s2; j <= s2; j++) {
for (int i = -s2; i <= s2; i++) {
//kernels[(i + s2) + (j + s2) * size + size * size * orientation + size * size * nrOfOrientations * scale] /= totalSum;
}
}
}
}
extern "C"
__global__ void applyKernels(
float* kernels,
float* inimg,
float* outimg,
int* positions,
int size,
int nrOfOrientations,
int scale,
int dimsx,
int dimsy,
int N_threads,
int NN)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int tid = index;
while (tid < NN)
{
int x = positions[2 * tid];
int y = positions[2 * tid + 1];
int s2 = size / 2;
int bestOrientation = 0;
float bestResponse = 0;
for (int r = 0; r < nrOfOrientations; r++) {
float sum = 0;
for (int j = -s2; j <= s2; j++) {
for (int i = -s2; i <= s2; i++) {
sum += inimg[x + i + dimsx * (y + j)] * kernels[i + s2 + size * (j + s2) + size * size * r + size * size * nrOfOrientations * scale];
}
}
if (sum > bestResponse) {
bestResponse = sum;
bestOrientation = r;
}
}
if (outimg[x + dimsx * y] < bestResponse)
{
outimg[x + dimsx * y] = bestResponse;
}
tid += N_threads;
}
}
|
2b1eedd0a0c804ec5233968984aae73ad55be047.cu
|
extern "C"
__global__ void createKernels(
float* kernels,
int size,
int nrOfOrientations,
int nrOfScales,
float sigma_min,
int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int orientation = threadIdx.x;
int scale = blockIdx.x;
if (index < N)
{
int s2 = size / 2;
int nn = 11;
float gamma = 20;
// see: http://en.wikipedia.org/wiki/Gabor_filter
float alpha = (3.141592654 * orientation) / nrOfOrientations;
float sigma_adjusted = 0.6 * sigma_min * scale + sigma_min;
float s2d_min = 2 * sigma_adjusted * sigma_adjusted;
float s2d_max = 200;
float lambda = 5 * sigma_adjusted;
float totalSum = 0;
for (int j = -s2; j <= s2; j++) {
for (int i = -s2; i <= s2; i++) {
float sum = 0;
for (int ii = 0; ii < nn; ii++) {
float xx = i - 0.5 + (1 + 2 * ii) / (2.0 * nn);
for (int jj = 0; jj < nn; jj++)
{
float yy = j - 0.5 + (1 + 2 * jj) / (2.0 * nn);
float xx_ = yy * sinf(alpha) + xx * cosf(alpha);
float yy_ = yy * cosf(alpha) - xx * sinf(alpha);
//sum += expf(-(xx_ * xx_) / s2d_max - (yy_ * yy_) / s2d_min) * cosf(2 * 3.141592654 * yy_ / (size));
// Gabor filter
sum += expf(-(xx_ * xx_) / s2d_min / gamma - (yy_ * yy_) / s2d_min) * cosf(2 * 3.141592654 * yy_ / lambda);
}
}
kernels[(i + s2) + (j + s2) * size + size * size * orientation + size * size * nrOfOrientations * scale] = sum;
totalSum += sum;
}
}
for (int j = -s2; j <= s2; j++) {
for (int i = -s2; i <= s2; i++) {
//kernels[(i + s2) + (j + s2) * size + size * size * orientation + size * size * nrOfOrientations * scale] /= totalSum;
}
}
}
}
extern "C"
__global__ void applyKernels(
float* kernels,
float* inimg,
float* outimg,
int* positions,
int size,
int nrOfOrientations,
int scale,
int dimsx,
int dimsy,
int N_threads,
int NN)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int tid = index;
while (tid < NN)
{
int x = positions[2 * tid];
int y = positions[2 * tid + 1];
int s2 = size / 2;
int bestOrientation = 0;
float bestResponse = 0;
for (int r = 0; r < nrOfOrientations; r++) {
float sum = 0;
for (int j = -s2; j <= s2; j++) {
for (int i = -s2; i <= s2; i++) {
sum += inimg[x + i + dimsx * (y + j)] * kernels[i + s2 + size * (j + s2) + size * size * r + size * size * nrOfOrientations * scale];
}
}
if (sum > bestResponse) {
bestResponse = sum;
bestOrientation = r;
}
}
if (outimg[x + dimsx * y] < bestResponse)
{
outimg[x + dimsx * y] = bestResponse;
}
tid += N_threads;
}
}
|
gpt2_self_softmax.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_fp16.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include "core/common.cuh"
template <typename T>
__global__
void softmax_kernel_gpt2(T *qk_buf,const int64_t* __restrict padding_index, const int head_num, const int seq_len, const T scalar)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
__shared__ float s_sum, s_max;
int left_padding_len = 0;
if (padding_index != nullptr){
left_padding_len = padding_index[batch_id];
}
// To avoid overflow
for (int i = 0; i < left_padding_len; i++)
{
if(threadIdx.x < seq_len)
qk_buf[threadIdx.x + qk_offset] = (T)0.0f;
qk_offset += seq_len;
}
for(int i = left_padding_len; i < seq_len; ++i)
{
float qk = threadIdx.x <= i ? (float)qk_buf[threadIdx.x + qk_offset] : 0.0f;
float left_padding_val = (threadIdx.x < left_padding_len)? -1e20f:0.0f;
float tmp = (threadIdx.x <= i ) ? (float)(qk * (float)scalar + left_padding_val): -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
qk = threadIdx.x <= i ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf[threadIdx.x + qk_offset] = threadIdx.x <= i ? (T)(qk / s_sum) : (T)0.0f;
qk_offset += seq_len;
}
}
template <typename T,int item_per_thread>
__global__
void softmax_kernel_gpt2_opt(T *qk_buf,const int64_t* __restrict padding_index, const int head_num, const int seq_len, const T scalar)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
int thread_id = threadIdx.x;
__shared__ float s_sum, s_max;
int left_padding_len = 0;
if (padding_index != nullptr){
left_padding_len = padding_index[batch_id];
}
// To avoid overflow
for (int i = 0; i < left_padding_len; i++)
{
for (int j = 0; j < item_per_thread; j++){
if(thread_id * item_per_thread + j < seq_len)
qk_buf[thread_id * item_per_thread + j + qk_offset] = (T)0.0f;
}
qk_offset += seq_len;
}
float qk[item_per_thread];
float left_padding_val[item_per_thread];
float tmp[item_per_thread];
for(int i = left_padding_len; i < seq_len; ++i)
{
for (int j = 0; j < item_per_thread; j++){
qk[j] =(thread_id * item_per_thread + j) <= i ? (float)qk_buf[thread_id * item_per_thread + j + qk_offset] : 0.0f;
left_padding_val[j] = ((thread_id * item_per_thread + j) < left_padding_len)? -1e20f:0.0f;
tmp[j] = ((thread_id * item_per_thread + j) <= i ) ? (float)(qk[j] * (float)scalar + left_padding_val[j]): -1e20f;
}
float max_val = blockReduceMax_opt<float,item_per_thread>(tmp);
if(thread_id == 0)
s_max = max_val;
__syncthreads();
for (int j = 0; j < item_per_thread; j++){
qk[j] = (thread_id * item_per_thread + j) <= i ? __expf(tmp[j] - s_max) : 0.0f;
}
float sum_val = blockReduceSum_opt<float,item_per_thread>(qk);
if(thread_id == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
for (int j = 0; j < item_per_thread; j++){
if((thread_id * item_per_thread + j) < seq_len)
qk_buf[thread_id * item_per_thread + j + qk_offset] = thread_id * item_per_thread + j <= i ? (T)(qk[j] / s_sum) : (T)0.0f;
}
qk_offset += seq_len;
}
}
template <class T>
void softmax_kernel(void *qk_buf_, const int64_t *__restrict padding_index, const int &batch_size,
const int &head_num, const int &seq_len, const float &scalar, const hipStream_t stream)
{
dim3 grid, block;
if(seq_len <= 1024)
{
if (seq_len <= 32)
block.x = 32;
else if (seq_len > 32 && seq_len <= 64)
block.x = 64;
else if (seq_len > 64 && seq_len <= 128)
block.x = 128;
else if (seq_len > 128 && seq_len <= 256)
block.x = 256;
else if (seq_len > 256 && seq_len <= 512)
block.x = 512;
else
block.x = 1024;
grid.x = batch_size * head_num;
hipLaunchKernelGGL(( softmax_kernel_gpt2<T>), dim3(grid), dim3(block), 0, stream, (T *)qk_buf_, padding_index, head_num, seq_len, scalar);
}
else
{
grid.x = batch_size * head_num;
if (seq_len <= 2048)
{
// block.x = seq_len/2;
block.x = ceil(seq_len / (32.0 * 2)) * 32; // item_per_thread = 1
hipLaunchKernelGGL(( softmax_kernel_gpt2_opt<T,2>), dim3(grid), dim3(block), 0, stream, (T *)qk_buf_, padding_index, head_num, seq_len, scalar);
}
else if (seq_len <= 4096)
{
block.x = ceil(seq_len / (32.0 * 4)) * 32; // item_per_thread = 1
hipLaunchKernelGGL(( softmax_kernel_gpt2_opt<T,4>), dim3(grid), dim3(block), 0, stream, (T *)qk_buf_, padding_index, head_num, seq_len, scalar);
}
else
{
std::cerr << "not support seq_len for softmax" << std::endl;
}
// else if (seq_len <= 8192)
// {
// block.x = ceil(seq_len / (32.0 * 8)) * 32; // item_per_thread = 1
// softmax_kernel_gpt2_opt<T,8><<<grid, block, 0, stream>>>((T *)qk_buf_, padding_index, head_num, seq_len, scalar);
// }
// else if (seq_len <= 16384)
// {
// block.x = ceil(seq_len / (32.0 * 16)) * 32; // item_per_thread = 1
// softmax_kernel_gpt2_opt<T,16><<<grid, block, 0, stream>>>((T *)qk_buf_, padding_index, head_num, seq_len, scalar);
// }
}
}
template void softmax_kernel<float>(void *qk_buf_, const int64_t* padding_index,const int& batch_size,
const int& head_num, const int& seq_len, const float& scalar, const hipStream_t stream);
template void softmax_kernel<half>(void *qk_buf_, const int64_t* padding_index, const int& batch_size,
const int& head_num, const int& seq_len, const float& scalar, const hipStream_t stream);
|
gpt2_self_softmax.cu
|
#include <cuda_fp16.h>
#include <iostream>
#include <cuda_runtime.h>
#include "core/common.cuh"
template <typename T>
__global__
void softmax_kernel_gpt2(T *qk_buf,const int64_t* __restrict padding_index, const int head_num, const int seq_len, const T scalar)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
__shared__ float s_sum, s_max;
int left_padding_len = 0;
if (padding_index != nullptr){
left_padding_len = padding_index[batch_id];
}
// To avoid overflow
for (int i = 0; i < left_padding_len; i++)
{
if(threadIdx.x < seq_len)
qk_buf[threadIdx.x + qk_offset] = (T)0.0f;
qk_offset += seq_len;
}
for(int i = left_padding_len; i < seq_len; ++i)
{
float qk = threadIdx.x <= i ? (float)qk_buf[threadIdx.x + qk_offset] : 0.0f;
float left_padding_val = (threadIdx.x < left_padding_len)? -1e20f:0.0f;
float tmp = (threadIdx.x <= i ) ? (float)(qk * (float)scalar + left_padding_val): -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
qk = threadIdx.x <= i ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf[threadIdx.x + qk_offset] = threadIdx.x <= i ? (T)(qk / s_sum) : (T)0.0f;
qk_offset += seq_len;
}
}
template <typename T,int item_per_thread>
__global__
void softmax_kernel_gpt2_opt(T *qk_buf,const int64_t* __restrict padding_index, const int head_num, const int seq_len, const T scalar)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
int thread_id = threadIdx.x;
__shared__ float s_sum, s_max;
int left_padding_len = 0;
if (padding_index != nullptr){
left_padding_len = padding_index[batch_id];
}
// To avoid overflow
for (int i = 0; i < left_padding_len; i++)
{
for (int j = 0; j < item_per_thread; j++){
if(thread_id * item_per_thread + j < seq_len)
qk_buf[thread_id * item_per_thread + j + qk_offset] = (T)0.0f;
}
qk_offset += seq_len;
}
float qk[item_per_thread];
float left_padding_val[item_per_thread];
float tmp[item_per_thread];
for(int i = left_padding_len; i < seq_len; ++i)
{
for (int j = 0; j < item_per_thread; j++){
qk[j] =(thread_id * item_per_thread + j) <= i ? (float)qk_buf[thread_id * item_per_thread + j + qk_offset] : 0.0f;
left_padding_val[j] = ((thread_id * item_per_thread + j) < left_padding_len)? -1e20f:0.0f;
tmp[j] = ((thread_id * item_per_thread + j) <= i ) ? (float)(qk[j] * (float)scalar + left_padding_val[j]): -1e20f;
}
float max_val = blockReduceMax_opt<float,item_per_thread>(tmp);
if(thread_id == 0)
s_max = max_val;
__syncthreads();
for (int j = 0; j < item_per_thread; j++){
qk[j] = (thread_id * item_per_thread + j) <= i ? __expf(tmp[j] - s_max) : 0.0f;
}
float sum_val = blockReduceSum_opt<float,item_per_thread>(qk);
if(thread_id == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
for (int j = 0; j < item_per_thread; j++){
if((thread_id * item_per_thread + j) < seq_len)
qk_buf[thread_id * item_per_thread + j + qk_offset] = thread_id * item_per_thread + j <= i ? (T)(qk[j] / s_sum) : (T)0.0f;
}
qk_offset += seq_len;
}
}
template <class T>
void softmax_kernel(void *qk_buf_, const int64_t *__restrict padding_index, const int &batch_size,
const int &head_num, const int &seq_len, const float &scalar, const cudaStream_t stream)
{
dim3 grid, block;
if(seq_len <= 1024)
{
if (seq_len <= 32)
block.x = 32;
else if (seq_len > 32 && seq_len <= 64)
block.x = 64;
else if (seq_len > 64 && seq_len <= 128)
block.x = 128;
else if (seq_len > 128 && seq_len <= 256)
block.x = 256;
else if (seq_len > 256 && seq_len <= 512)
block.x = 512;
else
block.x = 1024;
grid.x = batch_size * head_num;
softmax_kernel_gpt2<T><<<grid, block, 0, stream>>>((T *)qk_buf_, padding_index, head_num, seq_len, scalar);
}
else
{
grid.x = batch_size * head_num;
if (seq_len <= 2048)
{
// block.x = seq_len/2;
block.x = ceil(seq_len / (32.0 * 2)) * 32; // item_per_thread = 1
softmax_kernel_gpt2_opt<T,2><<<grid, block, 0, stream>>>((T *)qk_buf_, padding_index, head_num, seq_len, scalar);
}
else if (seq_len <= 4096)
{
block.x = ceil(seq_len / (32.0 * 4)) * 32; // item_per_thread = 1
softmax_kernel_gpt2_opt<T,4><<<grid, block, 0, stream>>>((T *)qk_buf_, padding_index, head_num, seq_len, scalar);
}
else
{
std::cerr << "not support seq_len for softmax" << std::endl;
}
// else if (seq_len <= 8192)
// {
// block.x = ceil(seq_len / (32.0 * 8)) * 32; // item_per_thread = 1
// softmax_kernel_gpt2_opt<T,8><<<grid, block, 0, stream>>>((T *)qk_buf_, padding_index, head_num, seq_len, scalar);
// }
// else if (seq_len <= 16384)
// {
// block.x = ceil(seq_len / (32.0 * 16)) * 32; // item_per_thread = 1
// softmax_kernel_gpt2_opt<T,16><<<grid, block, 0, stream>>>((T *)qk_buf_, padding_index, head_num, seq_len, scalar);
// }
}
}
template void softmax_kernel<float>(void *qk_buf_, const int64_t* padding_index,const int& batch_size,
const int& head_num, const int& seq_len, const float& scalar, const cudaStream_t stream);
template void softmax_kernel<half>(void *qk_buf_, const int64_t* padding_index, const int& batch_size,
const int& head_num, const int& seq_len, const float& scalar, const cudaStream_t stream);
|
ffc1a16408e63716f8b4f0cccadc5ef4f93820c2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "__intToFloat.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
__intToFloat), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
__intToFloat), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
__intToFloat), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ffc1a16408e63716f8b4f0cccadc5ef4f93820c2.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "__intToFloat.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
__intToFloat<<<gridBlock,threadBlock>>>(A,B,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
__intToFloat<<<gridBlock,threadBlock>>>(A,B,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
__intToFloat<<<gridBlock,threadBlock>>>(A,B,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
59bbe22c8e129ce245bc9cb66fe265b3cab6b699.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void mult1(int *A, int *B, int *C, int n){ //each thread computes the product of elements row-wise
int row = threadIdx.x;
for(int i=0;i<n;i++){
C[row*n+i] = A[row*n +i] * B[row*n+i];
}
}
__global__ void mult2(int *A, int *B, int *C, int m){ //each thread computes the product of elements column-wise
int col = threadIdx.x;
for(int i=0;i<m;i++){
C[col*m+i] = A[col*m +i] * B[col*m+i];
}
}
__global__ void mult3(int *A, int *B, int *C){ //each thread computes product of 2 elements
int ele = threadIdx.x, row=blockIdx.x, no_eles = blockDim.x;
C[row*no_eles + ele] = A[row*no_eles + ele] * B[row*no_eles + ele];
}
int main(){
int *a, *b, *t, m, n;
int *d_a, *d_b, *d_t;
printf("Enter the value of m: "); scanf("%d",&m);
printf("Enter the value of n: "); scanf("%d",&n);
int size = sizeof(int)*m*n;
a=(int*)malloc(size);
b=(int*)malloc(size);
t=(int*)malloc(size);
printf("Enter input matrix A: \n");
for(int i=0; i<m*n; i++)
scanf("%d",&a[i]);
printf("Enter input matrix B: \n");
for(int i=0; i<m*n; i++)
scanf("%d",&b[i]);
hipMalloc((void**)&d_a,size);
hipMalloc((void**)&d_b,size);
hipMalloc((void**)&d_t,size);
hipMemcpy(d_a,a,size,hipMemcpyHostToDevice);
hipMemcpy(d_b,b,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mult1), dim3(1),dim3(m), 0, 0, d_a,d_b,d_t,n);
hipMemcpy(t,d_t,size,hipMemcpyDeviceToHost);
printf("Resultant matrix ADD3:\n");
for(int i=0; i<m; i++){
for(int j=0; j<n; j++){
printf("%d ",t[i*n+j]);
}
printf("\n");
}
hipLaunchKernelGGL(( mult2), dim3(1),dim3(n), 0, 0, d_a,d_b,d_t,m);
hipMemcpy(t,d_t,size,hipMemcpyDeviceToHost);
printf("Resultant matrix ADD3:\n");
for(int i=0; i<m; i++){
for(int j=0; j<n; j++){
printf("%d ",t[i*n+j]);
}
printf("\n");
}
hipLaunchKernelGGL(( mult3), dim3(m),dim3(n), 0, 0, d_a,d_b,d_t);
hipMemcpy(t,d_t,size,hipMemcpyDeviceToHost);
printf("Resultant matrix ADD3:\n");
for(int i=0; i<m; i++){
for(int j=0; j<n; j++){
printf("%d ",t[i*n+j]);
}
printf("\n");
}
hipFree(d_a);
hipFree(d_t);
return 0;
}
|
59bbe22c8e129ce245bc9cb66fe265b3cab6b699.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void mult1(int *A, int *B, int *C, int n){ //each thread computes the product of elements row-wise
int row = threadIdx.x;
for(int i=0;i<n;i++){
C[row*n+i] = A[row*n +i] * B[row*n+i];
}
}
__global__ void mult2(int *A, int *B, int *C, int m){ //each thread computes the product of elements column-wise
int col = threadIdx.x;
for(int i=0;i<m;i++){
C[col*m+i] = A[col*m +i] * B[col*m+i];
}
}
__global__ void mult3(int *A, int *B, int *C){ //each thread computes product of 2 elements
int ele = threadIdx.x, row=blockIdx.x, no_eles = blockDim.x;
C[row*no_eles + ele] = A[row*no_eles + ele] * B[row*no_eles + ele];
}
int main(){
int *a, *b, *t, m, n;
int *d_a, *d_b, *d_t;
printf("Enter the value of m: "); scanf("%d",&m);
printf("Enter the value of n: "); scanf("%d",&n);
int size = sizeof(int)*m*n;
a=(int*)malloc(size);
b=(int*)malloc(size);
t=(int*)malloc(size);
printf("Enter input matrix A: \n");
for(int i=0; i<m*n; i++)
scanf("%d",&a[i]);
printf("Enter input matrix B: \n");
for(int i=0; i<m*n; i++)
scanf("%d",&b[i]);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void**)&d_t,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
mult1<<<1,m>>>(d_a,d_b,d_t,n);
cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost);
printf("Resultant matrix ADD3:\n");
for(int i=0; i<m; i++){
for(int j=0; j<n; j++){
printf("%d ",t[i*n+j]);
}
printf("\n");
}
mult2<<<1,n>>>(d_a,d_b,d_t,m);
cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost);
printf("Resultant matrix ADD3:\n");
for(int i=0; i<m; i++){
for(int j=0; j<n; j++){
printf("%d ",t[i*n+j]);
}
printf("\n");
}
mult3<<<m,n>>>(d_a,d_b,d_t);
cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost);
printf("Resultant matrix ADD3:\n");
for(int i=0; i<m; i++){
for(int j=0; j<n; j++){
printf("%d ",t[i*n+j]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_t);
return 0;
}
|
2688b3f2e2c250a9450f0081ca2d3b3faa6c8252.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "Utils.h"
#include "BlockUtils.h"
#include "Rando.h"
#include "Ply.h"
#include "RandPly.h"
#define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) {printf("Error at %s:%d\n",__FILE__,__LINE__); return EXIT_FAILURE;}} while(0)
////////////////////////////////////////////////////////////////////////////////
// randPly_local_update
// A 1d texture, working on a 2d torus, using 1d blocks and threads
////////////////////////////////////////////////////////////////////////////////
__global__ void randPly_local_update(float *resOut, float *plyIn, float *rands, int2 plySize,
float speed, float noise) {
int plyLength = plySize.x * plySize.y;
int step = blockDim.x * gridDim.x;
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < plyLength; i += step)
{
int col = i % plySize.x;
int row = (i - col) / plySize.x;
int colm = (col - 1 + plySize.x) % plySize.x;
int colp = (col + 1 + plySize.x) % plySize.x;
int rowm = (row - 1 + plySize.y) % plySize.y;
int rowp = (row + 1 + plySize.y) % plySize.y;
int left = colm + row * plySize.x;
int right = colp + row * plySize.x;
int top = col + rowp * plySize.x;
int bottom = col + rowm * plySize.x;
int center = col + row * plySize.x;
float t = plyIn[top];
float l = plyIn[left];
float c = plyIn[center];
float r = plyIn[right];
float b = plyIn[bottom];
float res = c + speed * (t + b + r + l) + noise * rands[i];
if (res > 1.0) {
res = 1.0;
}
if (res < -1.0) {
res = -1.0;
}
resOut[center] = res;
}
}
////////////////////////////////////////////////////////////////////////////////
//! MakeRandPly
////////////////////////////////////////////////////////////////////////////////
void MakeRandPly(RandPly **randPly, float *data, int seed, unsigned int plyLength, unsigned int span)
{
RandPly *rp = (RandPly *)malloc(sizeof(RandPly));
*randPly = rp;
MakePly(&(rp->ply), data, plyLength, span);
float *dev_rands;
int dataSize = plyLength * sizeof(float);
checkCudaErrors(hipMalloc((void **)&dev_rands, dataSize));
InitRandData(&(rp->randData), seed, plyLength, dev_rands);
}
////////////////////////////////////////////////////////////////////////////////
//! RunRandPlyLocalUpdate
////////////////////////////////////////////////////////////////////////////////
void RunRandPlyLocalUpdate(RandPly *randPly, int num_steps, float speed, float noise)
{
int blocks = SuggestedBlocks((randPly->ply->area + THREADS_1D - 1) / THREADS_1D);
int2 plySize;
plySize.x = randPly->ply->span; plySize.y = randPly->ply->span;
for (int step = 0; step < num_steps; step++)
{
UpdateRandData(randPly->randData);
if (randPly->ply->inToOut)
{
randPly_local_update << <blocks, THREADS_1D >> >(
randPly->ply->dev_outSrc,
randPly->ply->dev_inSrc,
randPly->randData->dev_rands,
plySize,
speed,
noise);
}
else
{
randPly_local_update << <blocks, THREADS_1D >> >(
randPly->ply->dev_inSrc,
randPly->ply->dev_outSrc,
randPly->randData->dev_rands,
plySize,
speed,
noise);
}
randPly->ply->inToOut = !randPly->ply->inToOut;
}
}
////////////////////////////////////////////////////////////////////////////////
//! GridEnergy
////////////////////////////////////////////////////////////////////////////////
float *GridEnergy(float *d_ply, int span, int dataLen)
{
int blocks = SuggestedBlocks((dataLen + THREADS_1D - 1) / THREADS_1D);
int plyMemSize = span * span * sizeof(float);
int2 plySize;
plySize.x = span; plySize.y = span;
float *d_energies;
checkCudaErrors(hipMalloc((void**)&d_energies, plyMemSize));
grid_local_energy << <blocks, THREADS_1D >> >(
d_energies,
d_ply,
plySize);
float *h_energies = (float *)malloc(plyMemSize);
checkCudaErrors(hipMemcpy(h_energies, d_energies, plyMemSize, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_energies));
return h_energies;
}
////////////////////////////////////////////////////////////////////////////////
//! GridEnergyC
////////////////////////////////////////////////////////////////////////////////
float *GridEnergyC(float *d_ply, int span, int dataLen)
{
int blocks = SuggestedBlocks((dataLen + THREADS_1D - 1) / THREADS_1D);
int blockEnergySize = blocks * blocks * sizeof(float);
int2 plySize;
plySize.x = span; plySize.y = span;
float *d_energies;
checkCudaErrors(hipMalloc((void**)&d_energies, blockEnergySize));
grid_local_energyC << <blocks, THREADS_1D >> >(
d_energies,
d_ply,
plySize);
float *h_energies = (float *)malloc(blockEnergySize);
checkCudaErrors(hipMemcpy(h_energies, d_energies, blockEnergySize, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_energies));
return h_energies;
}
////////////////////////////////////////////////////////////////////////////////
//! PlyEnergy
////////////////////////////////////////////////////////////////////////////////
float *PlyEnergy(Ply *ply)
{
if (ply->inToOut)
{
return GridEnergy(ply->dev_inSrc, ply->span, ply->area);
}
else
{
return GridEnergy(ply->dev_outSrc, ply->span, ply->area);
}
}
////////////////////////////////////////////////////////////////////////////////
//! PlyEnergyC
////////////////////////////////////////////////////////////////////////////////
float *PlyEnergyC(Ply *ply)
{
if (ply->inToOut)
{
return GridEnergyC(ply->dev_inSrc, ply->span, ply->area);
}
else
{
return GridEnergyC(ply->dev_outSrc, ply->span, ply->area);
}
}
////////////////////////////////////////////////////////////////////////////////
//! DeleteRandPly
////////////////////////////////////////////////////////////////////////////////
void DeleteRandPly(RandPly *randPly)
{
FreePly(randPly->ply);
DeleteRandData(randPly->randData);
}
////////////////////////////////////////////////////////////////////////////////
//! RunEnergyTest
////////////////////////////////////////////////////////////////////////////////
void RunEnergyTest(int argc, char **argv)
{
//dataLen=400 span=20 reps=50 seed=1243 speed=0.05 noise=0.01 batch=10
int dataLen = IntNamed(argc, argv, "dataLen", 81);
int span = IntNamed(argc, argv, "span", 9);
float *d_pattern;
float *h_pattern = CheckerArray(span);
PrintFloatArray(h_pattern, span, span*span);
int plyMemSize = span * span * sizeof(float);
checkCudaErrors(hipMalloc((void**)&d_pattern, plyMemSize));
checkCudaErrors(hipMemcpy(d_pattern, h_pattern, plyMemSize, hipMemcpyHostToDevice));
float *h_energies = GridEnergy(d_pattern, span, dataLen);
printf("Energies:\n");
PrintFloatArray(h_energies, span, span*span);
}
////////////////////////////////////////////////////////////////////////////////
//! RunEnergyTestC
////////////////////////////////////////////////////////////////////////////////
void RunEnergyTestC(int argc, char **argv)
{
//dataLen=400 span=20 reps=50 seed=1243 speed=0.05 noise=0.01 batch=10
int dataLen = IntNamed(argc, argv, "dataLen", 81);
int span = IntNamed(argc, argv, "span", 9);
int blocks = SuggestedBlocks((dataLen + THREADS_1D - 1) / THREADS_1D);
float *d_pattern;
float *h_pattern = CheckerArray(span);
PrintFloatArray(h_pattern, span, span*span);
int plyMemSize = span * span * sizeof(float);
checkCudaErrors(hipMalloc((void**)&d_pattern, plyMemSize));
checkCudaErrors(hipMemcpy(d_pattern, h_pattern, plyMemSize, hipMemcpyHostToDevice));
float *h_energies = GridEnergyC(d_pattern, span, dataLen);
//float *h_energies = (float *)malloc(plyMemSize);
//checkCudaErrors(hipMemcpy(h_energies, d_pattern, plyMemSize, hipMemcpyDeviceToHost));
printf("Energies:\n");
PrintFloatArray(h_energies, 1, blocks);
}
float *RunRandPly(int span, float speed, float noise, int seed, float *h_data, int reps)
{
RandPly *randPly;
MakeRandPly(&(randPly), h_data, seed, span*span, span);
RunRandPlyLocalUpdate(randPly, reps, speed, noise);
float *h_results = (float *)malloc(span * span * sizeof(float));
GetPlyData(randPly->ply, h_results);
DeleteRandPly(randPly);
return h_results;
}
////////////////////////////////////////////////////////////////////////////////
//! RunRandPlyBench
////////////////////////////////////////////////////////////////////////////////
void RunRandPlyBench(int argc, char **argv)
{
//dataLen=1048576 span=1024 reps=100 seed=6423 speed=0.1 noise=0.01 batch=1
//dataLen=400 span=20 reps=50 seed=1243 speed=0.05 noise=0.01 batch=10
int dataLen = IntNamed(argc, argv, "dataLen", 36);
int span = IntNamed(argc, argv, "span", 6);
int seed = IntNamed(argc, argv, "seed", 12);
int reps = IntNamed(argc, argv, "reps", 16);
float speed = FloatNamed(argc, argv, "speed", 0.05);
float noise = FloatNamed(argc, argv, "noise", 0.01);
int batch = IntNamed(argc, argv, "batch", 10);
printf("dataLen: %d batch: %d speed: %3.4f noise: %3.4f \n", dataLen, batch, speed, noise);
float *h_samples;
int dataSize = dataLen * sizeof(float);
h_samples = CheckerArray(span);
RandPly *randPly;
MakeRandPly(&(randPly), h_samples, seed, dataLen, span);
hipEvent_t start, stop;
float elapsedTime;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start, 0));
for (int i = 0; i < reps; i++) {
RunRandPlyLocalUpdate(randPly, batch, speed, noise);
float *out = PlyEnergy(randPly->ply);
printf("Energy: %3.4f\n", FloatArraySum(out, dataLen));
}
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("Time: %3.1f ms\n", elapsedTime);
GetPlyData(randPly->ply, h_samples);
//PrintFloatArray(h_samples, span, dataLen);
DeleteRandPly(randPly);
}
|
2688b3f2e2c250a9450f0081ca2d3b3faa6c8252.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <curand.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "Utils.h"
#include "BlockUtils.h"
#include "Rando.h"
#include "Ply.h"
#include "RandPly.h"
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) {printf("Error at %s:%d\n",__FILE__,__LINE__); return EXIT_FAILURE;}} while(0)
////////////////////////////////////////////////////////////////////////////////
// randPly_local_update
// A 1d texture, working on a 2d torus, using 1d blocks and threads
////////////////////////////////////////////////////////////////////////////////
__global__ void randPly_local_update(float *resOut, float *plyIn, float *rands, int2 plySize,
float speed, float noise) {
int plyLength = plySize.x * plySize.y;
int step = blockDim.x * gridDim.x;
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < plyLength; i += step)
{
int col = i % plySize.x;
int row = (i - col) / plySize.x;
int colm = (col - 1 + plySize.x) % plySize.x;
int colp = (col + 1 + plySize.x) % plySize.x;
int rowm = (row - 1 + plySize.y) % plySize.y;
int rowp = (row + 1 + plySize.y) % plySize.y;
int left = colm + row * plySize.x;
int right = colp + row * plySize.x;
int top = col + rowp * plySize.x;
int bottom = col + rowm * plySize.x;
int center = col + row * plySize.x;
float t = plyIn[top];
float l = plyIn[left];
float c = plyIn[center];
float r = plyIn[right];
float b = plyIn[bottom];
float res = c + speed * (t + b + r + l) + noise * rands[i];
if (res > 1.0) {
res = 1.0;
}
if (res < -1.0) {
res = -1.0;
}
resOut[center] = res;
}
}
////////////////////////////////////////////////////////////////////////////////
//! MakeRandPly
////////////////////////////////////////////////////////////////////////////////
void MakeRandPly(RandPly **randPly, float *data, int seed, unsigned int plyLength, unsigned int span)
{
RandPly *rp = (RandPly *)malloc(sizeof(RandPly));
*randPly = rp;
MakePly(&(rp->ply), data, plyLength, span);
float *dev_rands;
int dataSize = plyLength * sizeof(float);
checkCudaErrors(cudaMalloc((void **)&dev_rands, dataSize));
InitRandData(&(rp->randData), seed, plyLength, dev_rands);
}
////////////////////////////////////////////////////////////////////////////////
//! RunRandPlyLocalUpdate
////////////////////////////////////////////////////////////////////////////////
void RunRandPlyLocalUpdate(RandPly *randPly, int num_steps, float speed, float noise)
{
int blocks = SuggestedBlocks((randPly->ply->area + THREADS_1D - 1) / THREADS_1D);
int2 plySize;
plySize.x = randPly->ply->span; plySize.y = randPly->ply->span;
for (int step = 0; step < num_steps; step++)
{
UpdateRandData(randPly->randData);
if (randPly->ply->inToOut)
{
randPly_local_update << <blocks, THREADS_1D >> >(
randPly->ply->dev_outSrc,
randPly->ply->dev_inSrc,
randPly->randData->dev_rands,
plySize,
speed,
noise);
}
else
{
randPly_local_update << <blocks, THREADS_1D >> >(
randPly->ply->dev_inSrc,
randPly->ply->dev_outSrc,
randPly->randData->dev_rands,
plySize,
speed,
noise);
}
randPly->ply->inToOut = !randPly->ply->inToOut;
}
}
////////////////////////////////////////////////////////////////////////////////
//! GridEnergy
////////////////////////////////////////////////////////////////////////////////
float *GridEnergy(float *d_ply, int span, int dataLen)
{
int blocks = SuggestedBlocks((dataLen + THREADS_1D - 1) / THREADS_1D);
int plyMemSize = span * span * sizeof(float);
int2 plySize;
plySize.x = span; plySize.y = span;
float *d_energies;
checkCudaErrors(cudaMalloc((void**)&d_energies, plyMemSize));
grid_local_energy << <blocks, THREADS_1D >> >(
d_energies,
d_ply,
plySize);
float *h_energies = (float *)malloc(plyMemSize);
checkCudaErrors(cudaMemcpy(h_energies, d_energies, plyMemSize, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_energies));
return h_energies;
}
////////////////////////////////////////////////////////////////////////////////
//! GridEnergyC
////////////////////////////////////////////////////////////////////////////////
float *GridEnergyC(float *d_ply, int span, int dataLen)
{
int blocks = SuggestedBlocks((dataLen + THREADS_1D - 1) / THREADS_1D);
int blockEnergySize = blocks * blocks * sizeof(float);
int2 plySize;
plySize.x = span; plySize.y = span;
float *d_energies;
checkCudaErrors(cudaMalloc((void**)&d_energies, blockEnergySize));
grid_local_energyC << <blocks, THREADS_1D >> >(
d_energies,
d_ply,
plySize);
float *h_energies = (float *)malloc(blockEnergySize);
checkCudaErrors(cudaMemcpy(h_energies, d_energies, blockEnergySize, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_energies));
return h_energies;
}
////////////////////////////////////////////////////////////////////////////////
//! PlyEnergy
////////////////////////////////////////////////////////////////////////////////
float *PlyEnergy(Ply *ply)
{
if (ply->inToOut)
{
return GridEnergy(ply->dev_inSrc, ply->span, ply->area);
}
else
{
return GridEnergy(ply->dev_outSrc, ply->span, ply->area);
}
}
////////////////////////////////////////////////////////////////////////////////
//! PlyEnergyC
////////////////////////////////////////////////////////////////////////////////
float *PlyEnergyC(Ply *ply)
{
if (ply->inToOut)
{
return GridEnergyC(ply->dev_inSrc, ply->span, ply->area);
}
else
{
return GridEnergyC(ply->dev_outSrc, ply->span, ply->area);
}
}
////////////////////////////////////////////////////////////////////////////////
//! DeleteRandPly
////////////////////////////////////////////////////////////////////////////////
void DeleteRandPly(RandPly *randPly)
{
FreePly(randPly->ply);
DeleteRandData(randPly->randData);
}
////////////////////////////////////////////////////////////////////////////////
//! RunEnergyTest
////////////////////////////////////////////////////////////////////////////////
void RunEnergyTest(int argc, char **argv)
{
//dataLen=400 span=20 reps=50 seed=1243 speed=0.05 noise=0.01 batch=10
int dataLen = IntNamed(argc, argv, "dataLen", 81);
int span = IntNamed(argc, argv, "span", 9);
float *d_pattern;
float *h_pattern = CheckerArray(span);
PrintFloatArray(h_pattern, span, span*span);
int plyMemSize = span * span * sizeof(float);
checkCudaErrors(cudaMalloc((void**)&d_pattern, plyMemSize));
checkCudaErrors(cudaMemcpy(d_pattern, h_pattern, plyMemSize, cudaMemcpyHostToDevice));
float *h_energies = GridEnergy(d_pattern, span, dataLen);
printf("Energies:\n");
PrintFloatArray(h_energies, span, span*span);
}
////////////////////////////////////////////////////////////////////////////////
//! RunEnergyTestC
////////////////////////////////////////////////////////////////////////////////
void RunEnergyTestC(int argc, char **argv)
{
//dataLen=400 span=20 reps=50 seed=1243 speed=0.05 noise=0.01 batch=10
int dataLen = IntNamed(argc, argv, "dataLen", 81);
int span = IntNamed(argc, argv, "span", 9);
int blocks = SuggestedBlocks((dataLen + THREADS_1D - 1) / THREADS_1D);
float *d_pattern;
float *h_pattern = CheckerArray(span);
PrintFloatArray(h_pattern, span, span*span);
int plyMemSize = span * span * sizeof(float);
checkCudaErrors(cudaMalloc((void**)&d_pattern, plyMemSize));
checkCudaErrors(cudaMemcpy(d_pattern, h_pattern, plyMemSize, cudaMemcpyHostToDevice));
float *h_energies = GridEnergyC(d_pattern, span, dataLen);
//float *h_energies = (float *)malloc(plyMemSize);
//checkCudaErrors(cudaMemcpy(h_energies, d_pattern, plyMemSize, cudaMemcpyDeviceToHost));
printf("Energies:\n");
PrintFloatArray(h_energies, 1, blocks);
}
float *RunRandPly(int span, float speed, float noise, int seed, float *h_data, int reps)
{
RandPly *randPly;
MakeRandPly(&(randPly), h_data, seed, span*span, span);
RunRandPlyLocalUpdate(randPly, reps, speed, noise);
float *h_results = (float *)malloc(span * span * sizeof(float));
GetPlyData(randPly->ply, h_results);
DeleteRandPly(randPly);
return h_results;
}
////////////////////////////////////////////////////////////////////////////////
//! RunRandPlyBench
////////////////////////////////////////////////////////////////////////////////
void RunRandPlyBench(int argc, char **argv)
{
//dataLen=1048576 span=1024 reps=100 seed=6423 speed=0.1 noise=0.01 batch=1
//dataLen=400 span=20 reps=50 seed=1243 speed=0.05 noise=0.01 batch=10
int dataLen = IntNamed(argc, argv, "dataLen", 36);
int span = IntNamed(argc, argv, "span", 6);
int seed = IntNamed(argc, argv, "seed", 12);
int reps = IntNamed(argc, argv, "reps", 16);
float speed = FloatNamed(argc, argv, "speed", 0.05);
float noise = FloatNamed(argc, argv, "noise", 0.01);
int batch = IntNamed(argc, argv, "batch", 10);
printf("dataLen: %d batch: %d speed: %3.4f noise: %3.4f \n", dataLen, batch, speed, noise);
float *h_samples;
int dataSize = dataLen * sizeof(float);
h_samples = CheckerArray(span);
RandPly *randPly;
MakeRandPly(&(randPly), h_samples, seed, dataLen, span);
cudaEvent_t start, stop;
float elapsedTime;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start, 0));
for (int i = 0; i < reps; i++) {
RunRandPlyLocalUpdate(randPly, batch, speed, noise);
float *out = PlyEnergy(randPly->ply);
printf("Energy: %3.4f\n", FloatArraySum(out, dataLen));
}
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("Time: %3.1f ms\n", elapsedTime);
GetPlyData(randPly->ply, h_samples);
//PrintFloatArray(h_samples, span, dataLen);
DeleteRandPly(randPly);
}
|
1f568181605bbf1104c13080c1a1c637aaf40821.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
__global__ void reduce(int *g_idata, int *g_odata)
{
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2)
{
if (tid % (2 * s) == 0)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0)
{
g_odata[blockIdx.x] = sdata[0];
}
}
int main(int argc, char *argv[])
{
// We assume that the element number is the power of 2 for simplification.
const int elemNum = 1 << 22;
int arraySize = elemNum * sizeof(int);
// host memory
int *h_idata;
int sum;
// device memory
int *d_idata;
int *d_odata;
// initialize input data
h_idata = (int *) malloc(arraySize);
FILE *fp;
if((fp = fopen(argv[1], "rb")) == NULL)
{
printf("Can not open input file!\n");
exit(0);
}
for (int i = 0; i < elemNum; ++i)
{
fscanf(fp, "%d", &h_idata[i]);
}
fclose(fp);
// copy input data from CPU to GPU
hipMalloc((void **) &d_idata, arraySize);
hipMemcpy(d_idata, h_idata, arraySize, hipMemcpyHostToDevice);
int threadNum = 0;
int blockNum = 0;
// calculate the threadNum and blockNum for the first kernel
hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, 0);
int maxThreadsPerBlock = deviceProperties.maxThreadsPerBlock; // maxThreadsPerBlock = 1024 on K20X
threadNum = (elemNum > maxThreadsPerBlock)? maxThreadsPerBlock: elemNum;
blockNum = (int) ceil((double) elemNum / threadNum); // blockNum = 4096
// the number of output elements of the first kernel is blockNum
hipMalloc((void **) &d_odata, blockNum * sizeof(int));
// use GPU of id=0
hipSetDevice(0);
// parameters for the first kernel
dim3 gridDim(blockNum, 1, 1);
dim3 blockDim(threadNum, 1, 1);
int sMemSize = threadNum * sizeof(int);
hipEvent_t start, stop;
float stepTime;
float totalTime = 0;
// create event for recording GPU execution time
hipEventCreate(&start);
hipEventCreate(&stop);
// execute the first kernel and set the GPU timer
hipEventRecord(start, 0);
hipLaunchKernelGGL(( reduce), dim3(gridDim), dim3(blockDim), sMemSize, 0, d_idata, d_odata);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// calculate the execution time of the first kernel
hipEventElapsedTime(&stepTime, start, stop);
totalTime += stepTime;
hipEventDestroy(start);
hipEventDestroy(stop);
// calculate the threadNum and blockNum for the next kernel
threadNum = (blockNum > maxThreadsPerBlock)? maxThreadsPerBlock: blockNum;
blockNum = (int) ceil((double) blockNum / threadNum);
while(blockNum >= 1) {
// parameters for the current kernel
dim3 gridDim(blockNum, 1, 1);
dim3 blockDim(threadNum, 1, 1);
sMemSize = threadNum * sizeof(int);
// create event for recording GPU execution time
hipEventCreate(&start);
hipEventCreate(&stop);
// execute the current kernel and set the GPU timer
hipEventRecord(start, 0);
hipLaunchKernelGGL(( reduce), dim3(gridDim), dim3(blockDim), sMemSize, 0, d_odata, d_odata);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// calculate the execution time of the current kernel
hipEventElapsedTime(&stepTime, start, stop);
totalTime += stepTime;
hipEventDestroy(start);
hipEventDestroy(stop);
if (blockNum == 1) break;
// calculate the threadNum and blockNum for the next kernel
threadNum = (blockNum > maxThreadsPerBlock)? maxThreadsPerBlock: blockNum;
blockNum = (int) ceil((double) blockNum / threadNum);
}
// copy result back to CPU
hipMemcpy(&sum, d_odata, sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_idata);
hipFree(d_odata);
hipFree(d_odata);
float bandwidth = elemNum * sizeof(int) / (totalTime / 1000) / 1024 / 1024 / 1024;
printf("%d %fms %fGB/s\n", sum, totalTime, bandwidth);
return 0;
}
|
1f568181605bbf1104c13080c1a1c637aaf40821.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void reduce(int *g_idata, int *g_odata)
{
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2)
{
if (tid % (2 * s) == 0)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0)
{
g_odata[blockIdx.x] = sdata[0];
}
}
int main(int argc, char *argv[])
{
// We assume that the element number is the power of 2 for simplification.
const int elemNum = 1 << 22;
int arraySize = elemNum * sizeof(int);
// host memory
int *h_idata;
int sum;
// device memory
int *d_idata;
int *d_odata;
// initialize input data
h_idata = (int *) malloc(arraySize);
FILE *fp;
if((fp = fopen(argv[1], "rb")) == NULL)
{
printf("Can not open input file!\n");
exit(0);
}
for (int i = 0; i < elemNum; ++i)
{
fscanf(fp, "%d", &h_idata[i]);
}
fclose(fp);
// copy input data from CPU to GPU
cudaMalloc((void **) &d_idata, arraySize);
cudaMemcpy(d_idata, h_idata, arraySize, cudaMemcpyHostToDevice);
int threadNum = 0;
int blockNum = 0;
// calculate the threadNum and blockNum for the first kernel
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, 0);
int maxThreadsPerBlock = deviceProperties.maxThreadsPerBlock; // maxThreadsPerBlock = 1024 on K20X
threadNum = (elemNum > maxThreadsPerBlock)? maxThreadsPerBlock: elemNum;
blockNum = (int) ceil((double) elemNum / threadNum); // blockNum = 4096
// the number of output elements of the first kernel is blockNum
cudaMalloc((void **) &d_odata, blockNum * sizeof(int));
// use GPU of id=0
cudaSetDevice(0);
// parameters for the first kernel
dim3 gridDim(blockNum, 1, 1);
dim3 blockDim(threadNum, 1, 1);
int sMemSize = threadNum * sizeof(int);
cudaEvent_t start, stop;
float stepTime;
float totalTime = 0;
// create event for recording GPU execution time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// execute the first kernel and set the GPU timer
cudaEventRecord(start, 0);
reduce<<<gridDim, blockDim, sMemSize>>>(d_idata, d_odata);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculate the execution time of the first kernel
cudaEventElapsedTime(&stepTime, start, stop);
totalTime += stepTime;
cudaEventDestroy(start);
cudaEventDestroy(stop);
// calculate the threadNum and blockNum for the next kernel
threadNum = (blockNum > maxThreadsPerBlock)? maxThreadsPerBlock: blockNum;
blockNum = (int) ceil((double) blockNum / threadNum);
while(blockNum >= 1) {
// parameters for the current kernel
dim3 gridDim(blockNum, 1, 1);
dim3 blockDim(threadNum, 1, 1);
sMemSize = threadNum * sizeof(int);
// create event for recording GPU execution time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// execute the current kernel and set the GPU timer
cudaEventRecord(start, 0);
reduce<<<gridDim, blockDim, sMemSize>>>(d_odata, d_odata);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculate the execution time of the current kernel
cudaEventElapsedTime(&stepTime, start, stop);
totalTime += stepTime;
cudaEventDestroy(start);
cudaEventDestroy(stop);
if (blockNum == 1) break;
// calculate the threadNum and blockNum for the next kernel
threadNum = (blockNum > maxThreadsPerBlock)? maxThreadsPerBlock: blockNum;
blockNum = (int) ceil((double) blockNum / threadNum);
}
// copy result back to CPU
cudaMemcpy(&sum, d_odata, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_idata);
cudaFree(d_odata);
cudaFree(d_odata);
float bandwidth = elemNum * sizeof(int) / (totalTime / 1000) / 1024 / 1024 / 1024;
printf("%d %fms %fGB/s\n", sum, totalTime, bandwidth);
return 0;
}
|
08f57fb82965e6b133d75f1b81c0f5f2ace6beef.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void add(int* a, int* b, int *c) {
*c = *a + *b;
}
#include <stdio.h>
int main() {
int a, b, c;
int *d_a, *d_b, *d_c;
hipMalloc((void**) &d_a, sizeof(int));
hipMalloc((void**) &d_b, sizeof(int));
hipMalloc((void**) &d_c, sizeof(int));
a = 2; b = 234;
hipMemcpy(d_a, &a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, d_a, d_b, d_c);
hipMemcpy(&c, d_c, sizeof(int), hipMemcpyDeviceToHost);
printf("%d + %d = %d\n", a, b, c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
|
08f57fb82965e6b133d75f1b81c0f5f2ace6beef.cu
|
__global__ void add(int* a, int* b, int *c) {
*c = *a + *b;
}
#include <stdio.h>
int main() {
int a, b, c;
int *d_a, *d_b, *d_c;
cudaMalloc((void**) &d_a, sizeof(int));
cudaMalloc((void**) &d_b, sizeof(int));
cudaMalloc((void**) &d_c, sizeof(int));
a = 2; b = 234;
cudaMemcpy(d_a, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, sizeof(int), cudaMemcpyHostToDevice);
add<<<1, 1>>>(d_a, d_b, d_c);
cudaMemcpy(&c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d + %d = %d\n", a, b, c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
daeb52d00fb77e2fe8f2961feaf5c96c7c1a50cb.hip
|
// !!! This is a file automatically generated by hipify!!!
/**************************************************************
* File: rgb2gray.cu
* Description: CUDA implementation of application that transfers
* color picture to grayscale.
*
* Author: jfhansen
* Last Modification: 28/07/2020
*************************************************************/
#include <iostream>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define BLOCKDIM 16
/* Converts RGBA image to Grayscale image
* When converting image from RGB to grayscale photo,
* the pixels should use the following proportion of red, green and blue:
* I = 0.299f * R + 0.587f * G + 0.114f * B
* Arguments:
* rgbaImage: constant pointer to array of uchar4 holding RGBA values.
* grayImage: pointer to array of chars.
* numrows, numcols: Number of pixel rows and columns */
__global__ void cuda_rgba_to_grayscale(const uchar4 *const rgbaImage,
unsigned char *const grayImage, int numRows, int numCols)
{
// Get row and column for pixel
unsigned col, row;
col = threadIdx.x + blockDim.x * blockIdx.x;
row = threadIdx.y + blockDim.y * blockIdx.y;
// Fetch rgba value at pixel
uchar4 pixel = rgbaImage[row*numCols+col];
unsigned char brightness = (unsigned char)(.299f * pixel.x + .587f * pixel.y + .114f * pixel.z);
// Compute pixel brightness
grayImage[row*numCols+col] = brightness;
}
// Transfers h_rgbaImage to device, converts RGBA image to grayscale and transfers
// resulting grayscale image to host memory, h_grayImage.
void rgba_to_grayscale(const uchar4 *const d_rgbaImage, unsigned char *const d_grayImage,
size_t numRows, size_t numCols)
{
dim3 threadsPerBlock(BLOCKDIM,BLOCKDIM,1);
dim3 blocksPerGrid(
(numCols + BLOCKDIM - 1)/BLOCKDIM,
(numRows + BLOCKDIM - 1)/BLOCKDIM,
1);
hipLaunchKernelGGL(( cuda_rgba_to_grayscale), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_rgbaImage, d_grayImage, numRows, numCols);
hipError_t err;
while ( (err = hipGetLastError()) != hipSuccess )
std::cout << "CUDA Error: " << hipGetErrorString(err) << std::endl;
}
|
daeb52d00fb77e2fe8f2961feaf5c96c7c1a50cb.cu
|
/**************************************************************
* File: rgb2gray.cu
* Description: CUDA implementation of application that transfers
* color picture to grayscale.
*
* Author: jfhansen
* Last Modification: 28/07/2020
*************************************************************/
#include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define BLOCKDIM 16
/* Converts RGBA image to Grayscale image
* When converting image from RGB to grayscale photo,
* the pixels should use the following proportion of red, green and blue:
* I = 0.299f * R + 0.587f * G + 0.114f * B
* Arguments:
* rgbaImage: constant pointer to array of uchar4 holding RGBA values.
* grayImage: pointer to array of chars.
* numrows, numcols: Number of pixel rows and columns */
__global__ void cuda_rgba_to_grayscale(const uchar4 *const rgbaImage,
unsigned char *const grayImage, int numRows, int numCols)
{
// Get row and column for pixel
unsigned col, row;
col = threadIdx.x + blockDim.x * blockIdx.x;
row = threadIdx.y + blockDim.y * blockIdx.y;
// Fetch rgba value at pixel
uchar4 pixel = rgbaImage[row*numCols+col];
unsigned char brightness = (unsigned char)(.299f * pixel.x + .587f * pixel.y + .114f * pixel.z);
// Compute pixel brightness
grayImage[row*numCols+col] = brightness;
}
// Transfers h_rgbaImage to device, converts RGBA image to grayscale and transfers
// resulting grayscale image to host memory, h_grayImage.
void rgba_to_grayscale(const uchar4 *const d_rgbaImage, unsigned char *const d_grayImage,
size_t numRows, size_t numCols)
{
dim3 threadsPerBlock(BLOCKDIM,BLOCKDIM,1);
dim3 blocksPerGrid(
(numCols + BLOCKDIM - 1)/BLOCKDIM,
(numRows + BLOCKDIM - 1)/BLOCKDIM,
1);
cuda_rgba_to_grayscale<<<blocksPerGrid, threadsPerBlock>>>(d_rgbaImage, d_grayImage, numRows, numCols);
cudaError_t err;
while ( (err = cudaGetLastError()) != cudaSuccess )
std::cout << "CUDA Error: " << cudaGetErrorString(err) << std::endl;
}
|
20eb1e1cdd2a2ac623b4cc6bc88dd960e030ec47.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void timeDomainConvolutionNaive(float* ibuf, float* rbuf, float* obuf, long long oframes, long long rframes, int ch, float gain) {
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float value = 0;
for (int k = 0; k < rframes; k++) {
value += ibuf[threadID - k] * rbuf[k];
}
obuf[threadID * 2 + ch] = value * gain;
}
|
20eb1e1cdd2a2ac623b4cc6bc88dd960e030ec47.cu
|
#include "includes.h"
__global__ void timeDomainConvolutionNaive(float* ibuf, float* rbuf, float* obuf, long long oframes, long long rframes, int ch, float gain) {
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float value = 0;
for (int k = 0; k < rframes; k++) {
value += ibuf[threadID - k] * rbuf[k];
}
obuf[threadID * 2 + ch] = value * gain;
}
|
e7df8f86c299bcb51dd7fab1b7425e5ce7da5174.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
GK104-optimized variant of the "Persistent speculative
while-while" kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
This variant fetches new work dynamically as soon as the
warp occupancy drops below a pre-determined threshold.
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define STACK_SIZE 64 // Size of the traversal stack in local memory.
#define DYNAMIC_FETCH_THRESHOLD 20 // If fewer than this active, fetch new rays
extern "C" __device__ int g_warpCounter; // Work counter for persistent threads.
//------------------------------------------------------------------------
extern "C" __global__ void queryConfig(void)
{
g_config.bvhLayout = BVHLayout_Compact2;
g_config.blockWidth = 32;
g_config.blockHeight = 4;
g_config.usePersistentThreads = 1;
}
//------------------------------------------------------------------------
TRACE_FUNC
{
// Traversal stack in CUDA thread-local memory.
int traversalStack[STACK_SIZE];
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
// Live state during traversal, stored in registers.
float origx, origy, origz; // Ray origin.
char* stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int leafAddr2; // Second postponed leaf, non-negative if none.
int nodeAddr = EntrypointSentinel; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
float tmin;
int rayidx;
float oodx;
float oody;
float oodz;
float dirx;
float diry;
float dirz;
float idirx;
float idiry;
float idirz;
// Initialize persistent threads.
__shared__ volatile int nextRayArray[MaxBlockHeight]; // Current ray index in global buffer.
// Persistent threads: fetch and process rays in a loop.
do
{
const int tidx = threadIdx.x;
volatile int& rayBase = nextRayArray[threadIdx.y];
// Fetch new rays from the global pool using lane 0.
const bool terminated = nodeAddr==EntrypointSentinel;
const unsigned int maskTerminated = __ballot(terminated);
const int numTerminated = __popc(maskTerminated);
const int idxTerminated = __popc(maskTerminated & ((1u<<tidx)-1));
if(terminated)
{
if (idxTerminated == 0)
rayBase = atomicAdd(&g_warpCounter, numTerminated);
rayidx = rayBase + idxTerminated;
if (rayidx >= numRays)
break;
// Fetch ray.
float4 o = FETCH_GLOBAL(rays, rayidx * 2 + 0, float4);
float4 d = FETCH_GLOBAL(rays, rayidx * 2 + 1, float4);
origx = o.x;
origy = o.y;
origz = o.z;
tmin = o.w;
dirx = d.x;
diry = d.y;
dirz = d.z;
hitT = d.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
oodx = origx * idirx;
oody = origy * idiry;
oodz = origz * idirz;
// Setup traversal.
stackPtr = (char*)&traversalStack[0];
leafAddr = 0; // No postponed leaf.
leafAddr2= 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
}
// Traversal loop.
while(nodeAddr != EntrypointSentinel)
{
// Traverse internal nodes until all SIMD lanes have found a leaf.
// while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
while (unsigned int(nodeAddr) < unsigned int(EntrypointSentinel)) // functionally equivalent, but faster
{
// Fetch AABBs of the two child nodes.
const float4 n0xy = tex1Dfetch(t_nodesA, nodeAddr + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
const float4 n1xy = tex1Dfetch(t_nodesA, nodeAddr + 1); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
const float4 nz = tex1Dfetch(t_nodesA, nodeAddr + 2); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 tmp = tex1Dfetch(t_nodesA, nodeAddr + 3); // child_index0, child_index1
int2 cnodes= *(int2*)&tmp;
// Intersect the ray against the child nodes.
const float c0lox = n0xy.x * idirx - oodx;
const float c0hix = n0xy.y * idirx - oodx;
const float c0loy = n0xy.z * idiry - oody;
const float c0hiy = n0xy.w * idiry - oody;
const float c0loz = nz.x * idirz - oodz;
const float c0hiz = nz.y * idirz - oodz;
const float c1loz = nz.z * idirz - oodz;
const float c1hiz = nz.w * idirz - oodz;
const float c0min = spanBeginKepler(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin);
const float c0max = spanEndKepler (c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT);
const float c1lox = n1xy.x * idirx - oodx;
const float c1hix = n1xy.y * idirx - oodx;
const float c1loy = n1xy.z * idiry - oody;
const float c1hiy = n1xy.w * idiry - oody;
const float c1min = spanBeginKepler(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin);
const float c1max = spanEndKepler (c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT);
bool swp = (c1min < c0min);
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
// Neither child was intersected => pop stack.
if (!traverseChild0 && !traverseChild1)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// Otherwise => fetch child pointers.
else
{
nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y;
// Both children were intersected => push the farther one.
if (traverseChild0 && traverseChild1)
{
if (swp)
swap(nodeAddr, cnodes.y);
stackPtr += 4;
*(int*)stackPtr = cnodes.y;
}
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0) // Postpone max 1
// if (nodeAddr < 0 && leafAddr2 >= 0) // Postpone max 2
{
//leafAddr2= leafAddr; // postpone 2
leafAddr = nodeAddr;
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// All SIMD lanes have found a leaf? => process them.
// NOTE: inline PTX implementation of "if(!__any(leafAddr >= 0)) break;".
// tried everything with CUDA 4.2 but always got several redundant instructions.
unsigned int mask;
asm("{\n"
" .reg .pred p; \n"
"setp.ge.s32 p, %1, 0; \n"
"vote.ballot.b32 %0,p; \n"
"}"
: "=r"(mask)
: "r"(leafAddr));
if(!mask)
break;
//if(!__any(leafAddr >= 0))
// break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
for (int triAddr = ~leafAddr;; triAddr += 3)
{
// Tris in TEX (good to fetch as a single batch)
const float4 v00 = tex1Dfetch(t_trisA, triAddr + 0);
const float4 v11 = tex1Dfetch(t_trisA, triAddr + 1);
const float4 v22 = tex1Dfetch(t_trisA, triAddr + 2);
// End marker (negative zero) => all triangles processed.
if (__float_as_int(v00.x) == 0x80000000)
break;
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > tmin && t < hitT)
{
// Compute and check barycentric u.
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
float u = Ox + t*Dx;
if (u >= 0.0f)
{
// Compute and check barycentric v.
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
// if(leafAddr2<0) { leafAddr = leafAddr2; leafAddr2=0; } else // postpone2
{
leafAddr = nodeAddr;
if (nodeAddr < 0)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
}
} // leaf
// DYNAMIC FETCH
if( __popc(__ballot(true)) < DYNAMIC_FETCH_THRESHOLD )
break;
} // traversal
// Remap intersected triangle index, and store the result.
if (hitIndex == -1) { STORE_RESULT(rayidx, -1, hitT); }
else { STORE_RESULT(rayidx, FETCH_TEXTURE(triIndices, hitIndex, int), hitT); }
} while(true);
}
//------------------------------------------------------------------------
|
e7df8f86c299bcb51dd7fab1b7425e5ce7da5174.cu
|
/*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
GK104-optimized variant of the "Persistent speculative
while-while" kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
This variant fetches new work dynamically as soon as the
warp occupancy drops below a pre-determined threshold.
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define STACK_SIZE 64 // Size of the traversal stack in local memory.
#define DYNAMIC_FETCH_THRESHOLD 20 // If fewer than this active, fetch new rays
extern "C" __device__ int g_warpCounter; // Work counter for persistent threads.
//------------------------------------------------------------------------
extern "C" __global__ void queryConfig(void)
{
g_config.bvhLayout = BVHLayout_Compact2;
g_config.blockWidth = 32;
g_config.blockHeight = 4;
g_config.usePersistentThreads = 1;
}
//------------------------------------------------------------------------
TRACE_FUNC
{
// Traversal stack in CUDA thread-local memory.
int traversalStack[STACK_SIZE];
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
// Live state during traversal, stored in registers.
float origx, origy, origz; // Ray origin.
char* stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int leafAddr2; // Second postponed leaf, non-negative if none.
int nodeAddr = EntrypointSentinel; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
float tmin;
int rayidx;
float oodx;
float oody;
float oodz;
float dirx;
float diry;
float dirz;
float idirx;
float idiry;
float idirz;
// Initialize persistent threads.
__shared__ volatile int nextRayArray[MaxBlockHeight]; // Current ray index in global buffer.
// Persistent threads: fetch and process rays in a loop.
do
{
const int tidx = threadIdx.x;
volatile int& rayBase = nextRayArray[threadIdx.y];
// Fetch new rays from the global pool using lane 0.
const bool terminated = nodeAddr==EntrypointSentinel;
const unsigned int maskTerminated = __ballot(terminated);
const int numTerminated = __popc(maskTerminated);
const int idxTerminated = __popc(maskTerminated & ((1u<<tidx)-1));
if(terminated)
{
if (idxTerminated == 0)
rayBase = atomicAdd(&g_warpCounter, numTerminated);
rayidx = rayBase + idxTerminated;
if (rayidx >= numRays)
break;
// Fetch ray.
float4 o = FETCH_GLOBAL(rays, rayidx * 2 + 0, float4);
float4 d = FETCH_GLOBAL(rays, rayidx * 2 + 1, float4);
origx = o.x;
origy = o.y;
origz = o.z;
tmin = o.w;
dirx = d.x;
diry = d.y;
dirz = d.z;
hitT = d.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
oodx = origx * idirx;
oody = origy * idiry;
oodz = origz * idirz;
// Setup traversal.
stackPtr = (char*)&traversalStack[0];
leafAddr = 0; // No postponed leaf.
leafAddr2= 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
}
// Traversal loop.
while(nodeAddr != EntrypointSentinel)
{
// Traverse internal nodes until all SIMD lanes have found a leaf.
// while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
while (unsigned int(nodeAddr) < unsigned int(EntrypointSentinel)) // functionally equivalent, but faster
{
// Fetch AABBs of the two child nodes.
const float4 n0xy = tex1Dfetch(t_nodesA, nodeAddr + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
const float4 n1xy = tex1Dfetch(t_nodesA, nodeAddr + 1); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
const float4 nz = tex1Dfetch(t_nodesA, nodeAddr + 2); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 tmp = tex1Dfetch(t_nodesA, nodeAddr + 3); // child_index0, child_index1
int2 cnodes= *(int2*)&tmp;
// Intersect the ray against the child nodes.
const float c0lox = n0xy.x * idirx - oodx;
const float c0hix = n0xy.y * idirx - oodx;
const float c0loy = n0xy.z * idiry - oody;
const float c0hiy = n0xy.w * idiry - oody;
const float c0loz = nz.x * idirz - oodz;
const float c0hiz = nz.y * idirz - oodz;
const float c1loz = nz.z * idirz - oodz;
const float c1hiz = nz.w * idirz - oodz;
const float c0min = spanBeginKepler(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin);
const float c0max = spanEndKepler (c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT);
const float c1lox = n1xy.x * idirx - oodx;
const float c1hix = n1xy.y * idirx - oodx;
const float c1loy = n1xy.z * idiry - oody;
const float c1hiy = n1xy.w * idiry - oody;
const float c1min = spanBeginKepler(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin);
const float c1max = spanEndKepler (c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT);
bool swp = (c1min < c0min);
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
// Neither child was intersected => pop stack.
if (!traverseChild0 && !traverseChild1)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// Otherwise => fetch child pointers.
else
{
nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y;
// Both children were intersected => push the farther one.
if (traverseChild0 && traverseChild1)
{
if (swp)
swap(nodeAddr, cnodes.y);
stackPtr += 4;
*(int*)stackPtr = cnodes.y;
}
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0) // Postpone max 1
// if (nodeAddr < 0 && leafAddr2 >= 0) // Postpone max 2
{
//leafAddr2= leafAddr; // postpone 2
leafAddr = nodeAddr;
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// All SIMD lanes have found a leaf? => process them.
// NOTE: inline PTX implementation of "if(!__any(leafAddr >= 0)) break;".
// tried everything with CUDA 4.2 but always got several redundant instructions.
unsigned int mask;
asm("{\n"
" .reg .pred p; \n"
"setp.ge.s32 p, %1, 0; \n"
"vote.ballot.b32 %0,p; \n"
"}"
: "=r"(mask)
: "r"(leafAddr));
if(!mask)
break;
//if(!__any(leafAddr >= 0))
// break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
for (int triAddr = ~leafAddr;; triAddr += 3)
{
// Tris in TEX (good to fetch as a single batch)
const float4 v00 = tex1Dfetch(t_trisA, triAddr + 0);
const float4 v11 = tex1Dfetch(t_trisA, triAddr + 1);
const float4 v22 = tex1Dfetch(t_trisA, triAddr + 2);
// End marker (negative zero) => all triangles processed.
if (__float_as_int(v00.x) == 0x80000000)
break;
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > tmin && t < hitT)
{
// Compute and check barycentric u.
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
float u = Ox + t*Dx;
if (u >= 0.0f)
{
// Compute and check barycentric v.
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
// if(leafAddr2<0) { leafAddr = leafAddr2; leafAddr2=0; } else // postpone2
{
leafAddr = nodeAddr;
if (nodeAddr < 0)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
}
} // leaf
// DYNAMIC FETCH
if( __popc(__ballot(true)) < DYNAMIC_FETCH_THRESHOLD )
break;
} // traversal
// Remap intersected triangle index, and store the result.
if (hitIndex == -1) { STORE_RESULT(rayidx, -1, hitT); }
else { STORE_RESULT(rayidx, FETCH_TEXTURE(triIndices, hitIndex, int), hitT); }
} while(true);
}
//------------------------------------------------------------------------
|
ce6cdee76861efb349d2b41280c1780c03f990f6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016, Julian Straub <[email protected]> Licensed
* under the MIT license. See the license file LICENSE.
*/
#include <stdint.h>
#include <stdio.h>
#include <assert.h>
#include <tdp/data/image.h>
#include <tdp/data/managed_image.h>
#include <tdp/eigen/dense.h>
#include <tdp/reductions/reductions.cuh>
#include <tdp/nvidia/helper_cuda.h>
#include <tdp/cuda/cuda.cuh>
namespace tdp {
template <int K, int BLOCK_SIZE>
__global__ void MMFvMFCostFctAssignment(Image<Vector3fda> n,
Image<uint32_t> z, Image<Vector3fda> mu, Image<float> pi, float
*cost, float* W, int N_PER_T)
{
SharedMemory<float> smem;
float* data = smem.getPointer();
float* pik = data;
float* rho = &data[K*6];
float* Wi = &data[K*6+BLOCK_SIZE];
Vector3fda* mui = (Vector3fda*)(&data[K*6+2*BLOCK_SIZE]);//[K*6];
//__shared__ Vector3fda mui[K*6];
//__shared__ float pik[K*6];
//__shared__ float rho[BLOCK_SIZE];
//__shared__ float Wi[BLOCK_SIZE];
const int tid = threadIdx.x ;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
// caching
if(tid < K*6) mui[tid] = mu[tid];
if(K*6 <= tid && tid < 2*K*6) pik[tid-K*6] = pi[tid-K*6];
rho[tid] = 0.0f;
Wi[tid] = 0;
__syncthreads(); // make sure that ys have been cached
for(int id=idx*N_PER_T; id<min((int)n.Area(),(idx+1)*N_PER_T); ++id)
{
Vector3fda ni = n[id];
float err_max = -1e7f;
uint32_t k_max = 6*K+1;
if(IsValidNormal(ni)) {
#pragma unroll
for (uint32_t k=0; k<6*K; ++k) {
float err = pik[k] + ni.dot(mui[k]);
// if (id%5 == 0)
// printf("%d: err %f pi %f dot %f\n",k,err,pik[k],ni.dot(mui[k]));
if(err_max < err) {
err_max = err;
k_max = k;
}
}
rho[tid] += err_max;
Wi[tid] += 1.;
}
z[id] = k_max;
}
//reduction.....
SumPyramidReduce<float,float,BLOCK_SIZE>(tid,rho,cost,Wi,W);
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) {
// if(tid < s) {
// rho[tid] += rho[tid + s];
// Wi[tid] += Wi[tid + s];
// }
// __syncthreads();
// }
//
// if(tid==0) {
// atomicAdd(&cost[0],rho[0]+rho[1]);
// }
// if(tid==1) {
// atomicAdd(W,Wi[0]+Wi[1]);
// }
}
template <int K, int BLOCK_SIZE>
__global__ void MMFvMFCostFctAssignment(Image<Vector3fda> n,
Image<float> weights,
Image<uint32_t> z, Image<Vector3fda> mu, Image<float> pi, float
*cost, float* W, int N_PER_T)
{
//__shared__ float xi[BLOCK_SIZE*3];
SharedMemory<float> smem;
float* data = smem.getPointer();
float* pik = data;
float* rho = &data[K*6];
float* Wi = &data[K*6+BLOCK_SIZE];
Vector3fda* mui = (Vector3fda*)(&data[K*6+2*BLOCK_SIZE]);//[K*6];
// __shared__ Vector3fda mui[K*6];
// __shared__ float pik[K*6];
// __shared__ float rho[BLOCK_SIZE];
// __shared__ float Wi[BLOCK_SIZE];
const int tid = threadIdx.x ;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
// caching
if(tid < K*6) mui[tid] = mu[tid];
if(K*6 < tid && tid < 2*K*6) pik[tid-K*6] = pi[tid-K*6];
rho[tid] = 0.0f;
Wi[tid] = 0;
__syncthreads(); // make sure that ys have been cached
for(int id=idx*N_PER_T; id<min((int)n.Area(),(idx+1)*N_PER_T); ++id)
{
Vector3fda ni = n[id];
float weight = weights[id];
float err_max = -1e7f;
uint32_t k_max = 6*K+1;
if(IsValidNormal(ni)) {
#pragma unroll
for (uint32_t k=0; k<6*K; ++k) {
float err = pik[k] + ni.dot(mui[k]);
if(err_max < err) {
err_max = err;
k_max = k;
}
}
rho[tid] += weight*err_max;
Wi[tid] += weight;
}
z[id] = k_max;
}
SumPyramidReduce<float,float,BLOCK_SIZE>(tid,rho,cost,Wi,W);
}
void MMFvMFCostFctAssignmentGPU( Image<Vector3fda> cuN,
Image<uint32_t> cuZ, Image<Vector3fda>cuMu, Image<float> cuPi,
int K, float& cost, float& W)
{
if (K>=7) {
printf("currently only 7 MFvMFs are supported");
}
assert(K<8);
ManagedDeviceImage<float> cuCost(1,1);
ManagedDeviceImage<float> cuW(1,1);
hipMemset(cuCost.ptr_,0,cuCost.SizeBytes());
hipMemset(cuW.ptr_,0,cuW.SizeBytes());
const int N_PER_T = 16;
dim3 threads, blocks;
ComputeKernelParamsForArray(blocks,threads,cuN.Area(),256, N_PER_T);
const size_t memsize_bytes = (256*2 + K*6)*sizeof(float)+K*6*sizeof(Vector3fda);
if (K==1) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<1,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==2) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<2,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==3) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<3,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==4) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<4,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==5) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<5,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==6) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<6,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==7) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<7,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
}
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(&cost, cuCost.ptr_, sizeof(float),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&W, cuW.ptr_, sizeof(float),
hipMemcpyDeviceToHost));
}
void MMFvMFCostFctAssignmentGPU(
Image<Vector3fda> cuN, Image<float> cuWeights,
Image<uint32_t> cuZ, Image<Vector3fda>cuMu, Image<float> cuPi,
int K, float& cost, float& W
) {
if (K>=7) {
printf("currently only 7 MFvMFs are supported");
}
assert(K<8);
ManagedDeviceImage<float> cuCost(1,1);
ManagedDeviceImage<float> cuW(1,1);
hipMemset(cuCost.ptr_,0,cuCost.SizeBytes());
hipMemset(cuW.ptr_,0,cuW.SizeBytes());
const int N_PER_T = 16;
dim3 threads, blocks;
ComputeKernelParamsForArray(blocks,threads,cuN.Area(),256,N_PER_T);
const size_t memsize_bytes = (256*2 + K*6)*sizeof(float)+K*6*sizeof(Vector3fda);
if (K==1) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<1,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==2) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<2,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==3) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<3,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==4) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<4,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==5) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<5,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==6) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<6,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==7) {
hipLaunchKernelGGL(( MMFvMFCostFctAssignment<7,256>), dim3(blocks),dim3(threads),memsize_bytes, 0,
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
}
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(&cost, cuCost.ptr_, sizeof(float),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&W, cuW.ptr_, sizeof(float),
hipMemcpyDeviceToHost));
}
}
|
ce6cdee76861efb349d2b41280c1780c03f990f6.cu
|
/* Copyright (c) 2016, Julian Straub <[email protected]> Licensed
* under the MIT license. See the license file LICENSE.
*/
#include <stdint.h>
#include <stdio.h>
#include <assert.h>
#include <tdp/data/image.h>
#include <tdp/data/managed_image.h>
#include <tdp/eigen/dense.h>
#include <tdp/reductions/reductions.cuh>
#include <tdp/nvidia/helper_cuda.h>
#include <tdp/cuda/cuda.cuh>
namespace tdp {
template <int K, int BLOCK_SIZE>
__global__ void MMFvMFCostFctAssignment(Image<Vector3fda> n,
Image<uint32_t> z, Image<Vector3fda> mu, Image<float> pi, float
*cost, float* W, int N_PER_T)
{
SharedMemory<float> smem;
float* data = smem.getPointer();
float* pik = data;
float* rho = &data[K*6];
float* Wi = &data[K*6+BLOCK_SIZE];
Vector3fda* mui = (Vector3fda*)(&data[K*6+2*BLOCK_SIZE]);//[K*6];
//__shared__ Vector3fda mui[K*6];
//__shared__ float pik[K*6];
//__shared__ float rho[BLOCK_SIZE];
//__shared__ float Wi[BLOCK_SIZE];
const int tid = threadIdx.x ;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
// caching
if(tid < K*6) mui[tid] = mu[tid];
if(K*6 <= tid && tid < 2*K*6) pik[tid-K*6] = pi[tid-K*6];
rho[tid] = 0.0f;
Wi[tid] = 0;
__syncthreads(); // make sure that ys have been cached
for(int id=idx*N_PER_T; id<min((int)n.Area(),(idx+1)*N_PER_T); ++id)
{
Vector3fda ni = n[id];
float err_max = -1e7f;
uint32_t k_max = 6*K+1;
if(IsValidNormal(ni)) {
#pragma unroll
for (uint32_t k=0; k<6*K; ++k) {
float err = pik[k] + ni.dot(mui[k]);
// if (id%5 == 0)
// printf("%d: err %f pi %f dot %f\n",k,err,pik[k],ni.dot(mui[k]));
if(err_max < err) {
err_max = err;
k_max = k;
}
}
rho[tid] += err_max;
Wi[tid] += 1.;
}
z[id] = k_max;
}
//reduction.....
SumPyramidReduce<float,float,BLOCK_SIZE>(tid,rho,cost,Wi,W);
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) {
// if(tid < s) {
// rho[tid] += rho[tid + s];
// Wi[tid] += Wi[tid + s];
// }
// __syncthreads();
// }
//
// if(tid==0) {
// atomicAdd(&cost[0],rho[0]+rho[1]);
// }
// if(tid==1) {
// atomicAdd(W,Wi[0]+Wi[1]);
// }
}
template <int K, int BLOCK_SIZE>
__global__ void MMFvMFCostFctAssignment(Image<Vector3fda> n,
Image<float> weights,
Image<uint32_t> z, Image<Vector3fda> mu, Image<float> pi, float
*cost, float* W, int N_PER_T)
{
//__shared__ float xi[BLOCK_SIZE*3];
SharedMemory<float> smem;
float* data = smem.getPointer();
float* pik = data;
float* rho = &data[K*6];
float* Wi = &data[K*6+BLOCK_SIZE];
Vector3fda* mui = (Vector3fda*)(&data[K*6+2*BLOCK_SIZE]);//[K*6];
// __shared__ Vector3fda mui[K*6];
// __shared__ float pik[K*6];
// __shared__ float rho[BLOCK_SIZE];
// __shared__ float Wi[BLOCK_SIZE];
const int tid = threadIdx.x ;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
// caching
if(tid < K*6) mui[tid] = mu[tid];
if(K*6 < tid && tid < 2*K*6) pik[tid-K*6] = pi[tid-K*6];
rho[tid] = 0.0f;
Wi[tid] = 0;
__syncthreads(); // make sure that ys have been cached
for(int id=idx*N_PER_T; id<min((int)n.Area(),(idx+1)*N_PER_T); ++id)
{
Vector3fda ni = n[id];
float weight = weights[id];
float err_max = -1e7f;
uint32_t k_max = 6*K+1;
if(IsValidNormal(ni)) {
#pragma unroll
for (uint32_t k=0; k<6*K; ++k) {
float err = pik[k] + ni.dot(mui[k]);
if(err_max < err) {
err_max = err;
k_max = k;
}
}
rho[tid] += weight*err_max;
Wi[tid] += weight;
}
z[id] = k_max;
}
SumPyramidReduce<float,float,BLOCK_SIZE>(tid,rho,cost,Wi,W);
}
void MMFvMFCostFctAssignmentGPU( Image<Vector3fda> cuN,
Image<uint32_t> cuZ, Image<Vector3fda>cuMu, Image<float> cuPi,
int K, float& cost, float& W)
{
if (K>=7) {
printf("currently only 7 MFvMFs are supported");
}
assert(K<8);
ManagedDeviceImage<float> cuCost(1,1);
ManagedDeviceImage<float> cuW(1,1);
cudaMemset(cuCost.ptr_,0,cuCost.SizeBytes());
cudaMemset(cuW.ptr_,0,cuW.SizeBytes());
const int N_PER_T = 16;
dim3 threads, blocks;
ComputeKernelParamsForArray(blocks,threads,cuN.Area(),256, N_PER_T);
const size_t memsize_bytes = (256*2 + K*6)*sizeof(float)+K*6*sizeof(Vector3fda);
if (K==1) {
MMFvMFCostFctAssignment<1,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==2) {
MMFvMFCostFctAssignment<2,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==3) {
MMFvMFCostFctAssignment<3,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==4) {
MMFvMFCostFctAssignment<4,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==5) {
MMFvMFCostFctAssignment<5,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==6) {
MMFvMFCostFctAssignment<6,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==7) {
MMFvMFCostFctAssignment<7,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
}
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(&cost, cuCost.ptr_, sizeof(float),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&W, cuW.ptr_, sizeof(float),
cudaMemcpyDeviceToHost));
}
void MMFvMFCostFctAssignmentGPU(
Image<Vector3fda> cuN, Image<float> cuWeights,
Image<uint32_t> cuZ, Image<Vector3fda>cuMu, Image<float> cuPi,
int K, float& cost, float& W
) {
if (K>=7) {
printf("currently only 7 MFvMFs are supported");
}
assert(K<8);
ManagedDeviceImage<float> cuCost(1,1);
ManagedDeviceImage<float> cuW(1,1);
cudaMemset(cuCost.ptr_,0,cuCost.SizeBytes());
cudaMemset(cuW.ptr_,0,cuW.SizeBytes());
const int N_PER_T = 16;
dim3 threads, blocks;
ComputeKernelParamsForArray(blocks,threads,cuN.Area(),256,N_PER_T);
const size_t memsize_bytes = (256*2 + K*6)*sizeof(float)+K*6*sizeof(Vector3fda);
if (K==1) {
MMFvMFCostFctAssignment<1,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==2) {
MMFvMFCostFctAssignment<2,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==3) {
MMFvMFCostFctAssignment<3,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==4) {
MMFvMFCostFctAssignment<4,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==5) {
MMFvMFCostFctAssignment<5,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==6) {
MMFvMFCostFctAssignment<6,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
} else if (K==7) {
MMFvMFCostFctAssignment<7,256><<<blocks,threads,memsize_bytes>>>(
cuN,cuWeights,cuZ,cuMu,cuPi,cuCost.ptr_,cuW.ptr_,N_PER_T);
}
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(&cost, cuCost.ptr_, sizeof(float),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&W, cuW.ptr_, sizeof(float),
cudaMemcpyDeviceToHost));
}
}
|
39bf4bc87bfd2982288e2ec2b05071d32ab823f6.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
replace table
*/
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tuple.h"
texture <int2, hipTextureType1D, hipReadModeElementType> lt_tex;
texture <int2, hipTextureType1D, hipReadModeElementType> rt_tex;
extern "C" {
__global__
void partitioning_lt(
//TUPLE *t,
TUPLE *pt,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int p_n = p_num;
int t_n = t_num;
int rows_n = rows_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int temp = 0;
int2 fetched_val;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
//
fetched_val = tex1Dfetch(lt_tex, DEF + threadIdx.x + i*Dim);
if(blockIdx.x==0&&threadIdx.x==0){
printf("ok\n");
}
hash = fetched_val.y % p_n;
//hash = t[DEF + threadIdx.x + i*Dim].val%p_n;
temp = L[hash*t_n + x];
pt[temp].key = fetched_val.x;
pt[temp].val = fetched_val.y;
if(blockIdx.x==0&&threadIdx.x==0){
printf("%d\t%d\n",fetched_val.x,fetched_val.y);
}
/*
pt[temp].key = t[DEF + threadIdx.x + i*Dim].key;
pt[temp].val = t[DEF + threadIdx.x + i*Dim].val;
*/
L[hash*t_n + x] = temp + 1;
//printf("i = %d\tloc = %d\tt = %d\n",hash*t_num + x,L[hash*t_num + x],t[x*PER_TH + i].val);
}
}
}
__global__
void partitioning_rt(
//TUPLE *t,
TUPLE *pt,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int p_n = p_num;
int t_n = t_num;
int rows_n = rows_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int temp = 0;
int2 fetched_val;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
fetched_val = tex1Dfetch(rt_tex, DEF + threadIdx.x + i*Dim);
hash = fetched_val.y % p_n;
//hash = t[DEF + threadIdx.x + i*Dim].val%p_n;
temp = L[hash*t_n + x];
pt[temp].key = fetched_val.x;
pt[temp].val = fetched_val.y;
/*
pt[temp].key = t[DEF + threadIdx.x + i*Dim].key;
pt[temp].val = t[DEF + threadIdx.x + i*Dim].val;
*/
L[hash*t_n + x] = temp + 1;
//printf("i = %d\tloc = %d\tt = %d\n",hash*t_num + x,L[hash*t_num + x],t[x*PER_TH + i].val);
}
}
}
}
|
39bf4bc87bfd2982288e2ec2b05071d32ab823f6.cu
|
/*
replace table
*/
#include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <sys/time.h>
#include "tuple.h"
texture <int2, cudaTextureType1D, cudaReadModeElementType> lt_tex;
texture <int2, cudaTextureType1D, cudaReadModeElementType> rt_tex;
extern "C" {
__global__
void partitioning_lt(
//TUPLE *t,
TUPLE *pt,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int p_n = p_num;
int t_n = t_num;
int rows_n = rows_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int temp = 0;
int2 fetched_val;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
//ここまで実行
fetched_val = tex1Dfetch(lt_tex, DEF + threadIdx.x + i*Dim);
if(blockIdx.x==0&&threadIdx.x==0){
printf("ok\n");
}
hash = fetched_val.y % p_n;
//hash = t[DEF + threadIdx.x + i*Dim].val%p_n;
temp = L[hash*t_n + x];
pt[temp].key = fetched_val.x;
pt[temp].val = fetched_val.y;
if(blockIdx.x==0&&threadIdx.x==0){
printf("%d\t%d\n",fetched_val.x,fetched_val.y);
}
/*
pt[temp].key = t[DEF + threadIdx.x + i*Dim].key;
pt[temp].val = t[DEF + threadIdx.x + i*Dim].val;
*/
L[hash*t_n + x] = temp + 1;
//printf("i = %d\tloc = %d\tt = %d\n",hash*t_num + x,L[hash*t_num + x],t[x*PER_TH + i].val);
}
}
}
__global__
void partitioning_rt(
//TUPLE *t,
TUPLE *pt,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int p_n = p_num;
int t_n = t_num;
int rows_n = rows_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int temp = 0;
int2 fetched_val;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
fetched_val = tex1Dfetch(rt_tex, DEF + threadIdx.x + i*Dim);
hash = fetched_val.y % p_n;
//hash = t[DEF + threadIdx.x + i*Dim].val%p_n;
temp = L[hash*t_n + x];
pt[temp].key = fetched_val.x;
pt[temp].val = fetched_val.y;
/*
pt[temp].key = t[DEF + threadIdx.x + i*Dim].key;
pt[temp].val = t[DEF + threadIdx.x + i*Dim].val;
*/
L[hash*t_n + x] = temp + 1;
//printf("i = %d\tloc = %d\tt = %d\n",hash*t_num + x,L[hash*t_num + x],t[x*PER_TH + i].val);
}
}
}
}
|
389c7b128cdb49864c816875e05f94817ac29c2f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Main.cuh"
extern int NRAD, NSEC, size_grid, AdvecteLabel, OpenInner, Adiabatic, FastTransport;
extern float OmegaFrame;
extern float *Label_d, *QStar_d, *Qbase_d, *Qbase2_d, *DensStar_d, *array_d;
extern float *invdiffRmed_d, *Rinf_d, *Rmed_d, *invRmed_d, *Rsup_d, *invSurf_d, *Surf_d;
extern float *Dens_d, *Vrad_d, *Vtheta_d, *Energy_d, *Vazimutal_d, *DensInt_d;
extern float *DensStar, *QStar, *Qbase, *DensInt;
extern dim3 dimGrid2, dimBlock2, dimBlock, dimGrid4;
float *RadMomP, *RadMomM, *ThetaMomP, *ThetaMomM, *Work, *QRStar, *ExtLabel, *dq, *TempShift;
float *VthetaRes, *RadMomP_d, *RadMomM_d, *ThetaMomP_d, *ThetaMomM_d, *VthetaRes_d, *Work_d, *TempShift_d;
float *QRStar_d, *ExtLabel_d, *dq_d, *LostByDisk_d, *VMed_d;
float LostMass = 0.0;
static int UniformTransport;
int *NoSplitAdvection_d;
int *Nshift_d;
__host__ void Transport (float *Dens, float *Vrad, float *Vtheta, float *Energy, float *Label, float dt)
{
ComputeLRMomenta(); // bien hasta aca
//if (AdvecteLabel == YES) ComputeExtQty();
/* No-Alternate Directionnal Splitting */
OneWindRad (Dens, Vrad, Energy, dt);
OneWindTheta (Dens, Vtheta, Energy, dt);
ComputeVelocities (Dens, Vrad, Vtheta);
//if (AdvecteLabel) ComputeSpeQty (Dens, Label, ExtLabel);
}
__host__ void ComputeLRMomenta()
{
hipLaunchKernelGGL(( LRMomentaKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, RadMomP_d, RadMomM_d, ThetaMomP_d, ThetaMomM_d, Dens_d, Vrad_d, Vtheta_d,
NRAD, NSEC, Rmed_d, OmegaFrame);
gpuErrchk(hipDeviceSynchronize());
}
__host__ void ComputeExtQty()
{
hipLaunchKernelGGL(( ExtQtyKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, ExtLabel_d, Dens_d, Label_d, NSEC, NRAD);
gpuErrchk(hipDeviceSynchronize());
}
__host__ void OneWindRad (float *Dens, float *Vrad, float *Energy, float dt)
{
ComputeStarRad(Vrad, dt, 0);
ActualiseGasDens (DensInt, Dens);
VanLeerRadial (Vrad, dt, 0, 0);
VanLeerRadial (Vrad, dt, 0, 1);
VanLeerRadial (Vrad, dt, 0, 2);
VanLeerRadial (Vrad, dt, 0, 3);
if (Adiabatic)
VanLeerRadial (Vrad, dt, 0, 4);
if (AdvecteLabel == YES)
VanLeerRadial (Vrad, dt, 0, 5);
LostMass += VanLeerRadial (Vrad, dt, 1, 6);
}
__host__ void ComputeStarRad(float *Vrad, float dt, int option)
{
if(option == 0){
hipLaunchKernelGGL(( StarRadKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Dens_d, Vrad_d, DensStar_d, dt, NRAD, NSEC, invdiffRmed_d, Rmed_d, dq_d);
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( StarRadKernel2), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Dens_d, Vrad_d, DensStar_d, dt, NRAD, NSEC, invdiffRmed_d, Rmed_d, dq_d);
gpuErrchk(hipDeviceSynchronize());
}
else{
hipLaunchKernelGGL(( StarRadKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Work_d, Vrad_d, QRStar_d, dt, NRAD, NSEC, invdiffRmed_d, Rmed_d, dq_d);
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( StarRadKernel2), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Work_d, Vrad_d, QRStar_d, dt, NRAD, NSEC, invdiffRmed_d, Rmed_d, dq_d);
gpuErrchk(hipDeviceSynchronize());
}
}
__host__ void ActualiseGasDens(float *DensInt, float *Dens)
{
gpuErrchk(hipMemcpy(DensInt_d, Dens_d, size_grid*sizeof(float), hipMemcpyDeviceToDevice));
}
__host__ float VanLeerRadial (float *Vrad, float dt, int ReturnLost, int option)
{
float Lost = 0.0;
if(option == 0) DivisePolarGrid (RadMomP_d, DensInt_d, Work_d);
if(option == 1) DivisePolarGrid (RadMomM_d, DensInt_d, Work_d);
if(option == 2) DivisePolarGrid (ThetaMomP_d, DensInt_d, Work_d);
if(option == 3) DivisePolarGrid (ThetaMomM_d, DensInt_d, Work_d);
if(option == 4) DivisePolarGrid (Energy_d, DensInt_d, Work_d);
if(option == 6) DivisePolarGrid (Dens_d, DensInt_d, Work_d);
ComputeStarRad (Vrad, dt, 1);
if (option == 0) {
hipLaunchKernelGGL(( VanLeerRadialKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Rinf_d, Rsup_d, QRStar_d, DensStar_d, Vrad_d,
LostByDisk_d, NSEC, NRAD, dt, OpenInner, RadMomP_d, invSurf_d);
gpuErrchk(hipDeviceSynchronize());
}
if (option == 1) {
hipLaunchKernelGGL(( VanLeerRadialKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Rinf_d, Rsup_d, QRStar_d, DensStar_d, Vrad_d,
LostByDisk_d, NSEC, NRAD, dt, OpenInner, RadMomM_d, invSurf_d);
gpuErrchk(hipDeviceSynchronize());
}
if (option == 2) {
hipLaunchKernelGGL(( VanLeerRadialKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Rinf_d, Rsup_d, QRStar_d, DensStar_d, Vrad_d,
LostByDisk_d, NSEC, NRAD, dt, OpenInner, ThetaMomP_d, invSurf_d);
gpuErrchk(hipDeviceSynchronize());
}
if (option == 3) {
hipLaunchKernelGGL(( VanLeerRadialKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Rinf_d, Rsup_d, QRStar_d, DensStar_d, Vrad_d,
LostByDisk_d, NSEC, NRAD, dt, OpenInner, ThetaMomM_d, invSurf_d);
gpuErrchk(hipDeviceSynchronize());
}
if (option == 4) {
hipLaunchKernelGGL(( VanLeerRadialKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Rinf_d, Rsup_d, QRStar_d, DensStar_d, Vrad_d,
LostByDisk_d, NSEC, NRAD, dt, OpenInner, Energy_d, invSurf_d);
gpuErrchk(hipDeviceSynchronize());
}
if (option == 6) {
hipLaunchKernelGGL(( VanLeerRadialKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Rinf_d, Rsup_d, QRStar_d, DensStar_d, Vrad_d,
LostByDisk_d, NSEC, NRAD, dt, OpenInner, Dens_d, invSurf_d);
gpuErrchk(hipDeviceSynchronize());
}
if (ReturnLost) Lost = DeviceReduce(LostByDisk_d, NSEC);
return Lost;
}
__host__ void ComputeSpeQty (float *Dens, float *label, float *ExtLabel)
{
hipLaunchKernelGGL(( ComputeSpeQtyKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Label_d, Dens_d, ExtLabel_d, NRAD, NSEC);
gpuErrchk(hipDeviceSynchronize());
}
__host__ void ComputeVelocities(float *Dens, float *Vrad, float *Vtheta)
{
hipLaunchKernelGGL(( ComputeVelocitiesKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Vrad_d, Vtheta_d, Dens_d, Rmed_d, ThetaMomP_d,
ThetaMomM_d, RadMomP_d, RadMomM_d, NRAD, NSEC, OmegaFrame);
gpuErrchk(hipDeviceSynchronize());
}
__host__ void OneWindTheta (float *Dens, float *Vtheta, float *Energy, float dt)
{
ComputeAverageThetaVelocities (Vtheta, dt);
ComputeResiduals (Vtheta, dt);
ComputeConstantResidual (Vtheta, dt); /* Constant residual is in Vtheta from now on */
UniformTransport = NO;
QuantitiesAdvection (Dens, VthetaRes_d, Energy, dt, 0);
UniformTransport = YES;
QuantitiesAdvection (Dens, Vtheta_d, Energy, dt, 1);
AdvectSHIFT (RadMomP_d);
AdvectSHIFT (RadMomM_d);
AdvectSHIFT (ThetaMomP_d);
AdvectSHIFT (ThetaMomM_d);
if (Adiabatic) AdvectSHIFT (Energy_d);
//if (AdvecteLabel) AdvectSHIFT (ExtLabel_d);
AdvectSHIFT (Dens_d);
}
__host__ void ComputeAverageThetaVelocities (float *Vtheta, float dt)
{
hipLaunchKernelGGL(( ComputeAverageThetaVelocitiesKernel), dim3(dimGrid4), dim3(dimBlock), 0, 0, Vtheta_d, VMed_d, NSEC, NRAD);
gpuErrchk(hipDeviceSynchronize());
}
__host__ void ComputeResiduals (float *Vtheta, float dt)
{
hipLaunchKernelGGL(( ComputeResidualsKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, VthetaRes_d, VMed_d, NSEC, NRAD, Vtheta_d);
gpuErrchk(hipDeviceSynchronize());
}
__host__ void AdvectSHIFT (float *array_d)
{
hipLaunchKernelGGL(( AdvectSHIFTKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, array_d, TempShift_d, NSEC, NRAD, Nshift_d);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(array_d, TempShift_d, size_grid*sizeof(float), hipMemcpyDeviceToDevice));
}
__host__ void ComputeConstantResidual (float *Vtheta, float dt)
{
hipLaunchKernelGGL(( ComputeConstantResidualKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, VMed_d, invRmed_d, Nshift_d, NoSplitAdvection_d,
NSEC, NRAD, dt, Vtheta_d, VthetaRes_d, Rmed_d, FastTransport);
gpuErrchk(hipDeviceSynchronize());
}
__host__ void QuantitiesAdvection (float *Dens, float *Vazimutal_d, float *Energy, float dt, int option)
{
ComputeStarTheta (Dens_d, Vazimutal_d, DensStar_d, dt);
ActualiseGasDens (DensInt, Dens);
VanLeerTheta (Vazimutal_d, RadMomP_d, dt);
VanLeerTheta (Vazimutal_d, RadMomM_d, dt);
VanLeerTheta (Vazimutal_d, ThetaMomP_d, dt);
VanLeerTheta (Vazimutal_d, ThetaMomM_d, dt);
if (Adiabatic)
VanLeerTheta (Vazimutal_d, Energy_d, dt);
//if (AdvecteLabel)
//VanLeerTheta (Vazimutal_d, ExtLabel_d, dt);
VanLeerTheta (Vazimutal_d, Dens_d, dt); /* MUST be the last line */
}
__host__ void VanLeerTheta (float *Vazimutal_d, float *Qbase_d, float dt)
{
DivisePolarGrid (Qbase_d, DensInt_d, Work_d);
ComputeStarTheta (Work_d, Vazimutal_d, QRStar_d, dt);
hipLaunchKernelGGL(( VanLeerThetaKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Rsup_d, Rinf_d, Surf_d, dt, NRAD, NSEC, 0,
NoSplitAdvection_d, QRStar_d, DensStar_d, Vazimutal_d, Qbase_d);
gpuErrchk(hipDeviceSynchronize());
}
__host__ void ComputeStarTheta (float *Qbase_d, float *Vazimutal_d, float *QStar_d, float dt)
{
//gpuErrchk(hipMemset(dq_d, 0, size_grid*sizeof(float)));
hipLaunchKernelGGL(( StarThetaKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Qbase_d, Rmed_d, NRAD, NSEC, dq_d, dt);
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( StarThetaKernel2), dim3(dimGrid2), dim3(dimBlock2), 0, 0, Qbase_d, Rmed_d, Vazimutal_d, QStar_d, NRAD, NSEC, dq_d, dt);
gpuErrchk(hipDeviceSynchronize());
}
__host__ void InitTransport ()
{
RadMomP = (float *)malloc(size_grid*sizeof(float));
RadMomM = (float *)malloc(size_grid*sizeof(float));
ThetaMomP = (float *)malloc(size_grid*sizeof(float));
ThetaMomM = (float *)malloc(size_grid*sizeof(float));
Work = (float *)malloc(size_grid*sizeof(float));
QRStar = (float *)malloc(size_grid*sizeof(float));
ExtLabel = (float *)malloc(size_grid*sizeof(float));
VthetaRes = (float *)malloc(size_grid*sizeof(float));
TempShift = (float *)malloc(size_grid*sizeof(float));
dq = (float *)malloc(size_grid*sizeof(float));
InitTransportDevice();
}
__host__ void InitTransportDevice()
{
gpuErrchk(hipMalloc((void**)&RadMomP_d, size_grid*sizeof(float)));
gpuErrchk(hipMalloc((void**)&RadMomM_d, size_grid*sizeof(float)));
gpuErrchk(hipMalloc((void**)&ThetaMomP_d, size_grid*sizeof(float)));
gpuErrchk(hipMalloc((void**)&ThetaMomM_d, size_grid*sizeof(float)));
gpuErrchk(hipMalloc((void**)&Work_d, size_grid*sizeof(float)));
gpuErrchk(hipMalloc((void**)&QRStar_d, size_grid*sizeof(float)));
gpuErrchk(hipMalloc((void**)&ExtLabel_d, size_grid*sizeof(float)));
gpuErrchk(hipMalloc((void**)&dq_d, size_grid*sizeof(float)));
gpuErrchk(hipMalloc((void**)&LostByDisk_d, NSEC*sizeof(float)));
gpuErrchk(hipMalloc((void**)&VthetaRes_d, size_grid*sizeof(float)));
gpuErrchk(hipMalloc((void**)&TempShift_d, size_grid*sizeof(float)));
gpuErrchk(hipMalloc((void**)&VMed_d, NRAD*sizeof(float)));
gpuErrchk(hipMalloc((void**)&Nshift_d, NRAD*sizeof(int)));
gpuErrchk(hipMalloc((void**)&NoSplitAdvection_d, NRAD*sizeof(int)));
gpuErrchk(hipMemset(RadMomP_d, 0, size_grid*sizeof(float)));
gpuErrchk(hipMemset(RadMomM_d, 0, size_grid*sizeof(float)));
gpuErrchk(hipMemset(ThetaMomP_d, 0, size_grid*sizeof(float)));
gpuErrchk(hipMemset(ThetaMomM_d, 0, size_grid*sizeof(float)));
gpuErrchk(hipMemset(Work_d, 0, size_grid*sizeof(float)));
gpuErrchk(hipMemset(QRStar_d, 0, size_grid*sizeof(float)));
gpuErrchk(hipMemset(ExtLabel_d, 0, size_grid*sizeof(float)));
gpuErrchk(hipMemset(dq_d, 0, size_grid*sizeof(float)));
gpuErrchk(hipMemset(LostByDisk_d, 0, NSEC*sizeof(float)));
gpuErrchk(hipMemset(VthetaRes_d, 0, size_grid*sizeof(float)));
gpuErrchk(hipMemset(TempShift_d, 0, size_grid*sizeof(float)));
gpuErrchk(hipMemset(VMed_d, 0, NRAD*sizeof(float)));
gpuErrchk(hipMemset(Nshift_d, 0, NRAD*sizeof(int)));
gpuErrchk(hipMemset(NoSplitAdvection_d, 0, NRAD*sizeof(int)));
}
|
389c7b128cdb49864c816875e05f94817ac29c2f.cu
|
#include "Main.cuh"
extern int NRAD, NSEC, size_grid, AdvecteLabel, OpenInner, Adiabatic, FastTransport;
extern float OmegaFrame;
extern float *Label_d, *QStar_d, *Qbase_d, *Qbase2_d, *DensStar_d, *array_d;
extern float *invdiffRmed_d, *Rinf_d, *Rmed_d, *invRmed_d, *Rsup_d, *invSurf_d, *Surf_d;
extern float *Dens_d, *Vrad_d, *Vtheta_d, *Energy_d, *Vazimutal_d, *DensInt_d;
extern float *DensStar, *QStar, *Qbase, *DensInt;
extern dim3 dimGrid2, dimBlock2, dimBlock, dimGrid4;
float *RadMomP, *RadMomM, *ThetaMomP, *ThetaMomM, *Work, *QRStar, *ExtLabel, *dq, *TempShift;
float *VthetaRes, *RadMomP_d, *RadMomM_d, *ThetaMomP_d, *ThetaMomM_d, *VthetaRes_d, *Work_d, *TempShift_d;
float *QRStar_d, *ExtLabel_d, *dq_d, *LostByDisk_d, *VMed_d;
float LostMass = 0.0;
static int UniformTransport;
int *NoSplitAdvection_d;
int *Nshift_d;
__host__ void Transport (float *Dens, float *Vrad, float *Vtheta, float *Energy, float *Label, float dt)
{
ComputeLRMomenta(); // bien hasta aca
//if (AdvecteLabel == YES) ComputeExtQty();
/* No-Alternate Directionnal Splitting */
OneWindRad (Dens, Vrad, Energy, dt);
OneWindTheta (Dens, Vtheta, Energy, dt);
ComputeVelocities (Dens, Vrad, Vtheta);
//if (AdvecteLabel) ComputeSpeQty (Dens, Label, ExtLabel);
}
__host__ void ComputeLRMomenta()
{
LRMomentaKernel<<<dimGrid2, dimBlock2>>>(RadMomP_d, RadMomM_d, ThetaMomP_d, ThetaMomM_d, Dens_d, Vrad_d, Vtheta_d,
NRAD, NSEC, Rmed_d, OmegaFrame);
gpuErrchk(cudaDeviceSynchronize());
}
__host__ void ComputeExtQty()
{
ExtQtyKernel<<<dimGrid2, dimBlock2>>>(ExtLabel_d, Dens_d, Label_d, NSEC, NRAD);
gpuErrchk(cudaDeviceSynchronize());
}
__host__ void OneWindRad (float *Dens, float *Vrad, float *Energy, float dt)
{
ComputeStarRad(Vrad, dt, 0);
ActualiseGasDens (DensInt, Dens);
VanLeerRadial (Vrad, dt, 0, 0);
VanLeerRadial (Vrad, dt, 0, 1);
VanLeerRadial (Vrad, dt, 0, 2);
VanLeerRadial (Vrad, dt, 0, 3);
if (Adiabatic)
VanLeerRadial (Vrad, dt, 0, 4);
if (AdvecteLabel == YES)
VanLeerRadial (Vrad, dt, 0, 5);
LostMass += VanLeerRadial (Vrad, dt, 1, 6);
}
__host__ void ComputeStarRad(float *Vrad, float dt, int option)
{
if(option == 0){
StarRadKernel<<<dimGrid2, dimBlock2>>> (Dens_d, Vrad_d, DensStar_d, dt, NRAD, NSEC, invdiffRmed_d, Rmed_d, dq_d);
gpuErrchk(cudaDeviceSynchronize());
StarRadKernel2<<<dimGrid2, dimBlock2>>> (Dens_d, Vrad_d, DensStar_d, dt, NRAD, NSEC, invdiffRmed_d, Rmed_d, dq_d);
gpuErrchk(cudaDeviceSynchronize());
}
else{
StarRadKernel<<<dimGrid2, dimBlock2>>> (Work_d, Vrad_d, QRStar_d, dt, NRAD, NSEC, invdiffRmed_d, Rmed_d, dq_d);
gpuErrchk(cudaDeviceSynchronize());
StarRadKernel2<<<dimGrid2, dimBlock2>>> (Work_d, Vrad_d, QRStar_d, dt, NRAD, NSEC, invdiffRmed_d, Rmed_d, dq_d);
gpuErrchk(cudaDeviceSynchronize());
}
}
__host__ void ActualiseGasDens(float *DensInt, float *Dens)
{
gpuErrchk(cudaMemcpy(DensInt_d, Dens_d, size_grid*sizeof(float), cudaMemcpyDeviceToDevice));
}
__host__ float VanLeerRadial (float *Vrad, float dt, int ReturnLost, int option)
{
float Lost = 0.0;
if(option == 0) DivisePolarGrid (RadMomP_d, DensInt_d, Work_d);
if(option == 1) DivisePolarGrid (RadMomM_d, DensInt_d, Work_d);
if(option == 2) DivisePolarGrid (ThetaMomP_d, DensInt_d, Work_d);
if(option == 3) DivisePolarGrid (ThetaMomM_d, DensInt_d, Work_d);
if(option == 4) DivisePolarGrid (Energy_d, DensInt_d, Work_d);
if(option == 6) DivisePolarGrid (Dens_d, DensInt_d, Work_d);
ComputeStarRad (Vrad, dt, 1);
if (option == 0) {
VanLeerRadialKernel<<<dimGrid2, dimBlock2>>>(Rinf_d, Rsup_d, QRStar_d, DensStar_d, Vrad_d,
LostByDisk_d, NSEC, NRAD, dt, OpenInner, RadMomP_d, invSurf_d);
gpuErrchk(cudaDeviceSynchronize());
}
if (option == 1) {
VanLeerRadialKernel<<<dimGrid2, dimBlock2>>>(Rinf_d, Rsup_d, QRStar_d, DensStar_d, Vrad_d,
LostByDisk_d, NSEC, NRAD, dt, OpenInner, RadMomM_d, invSurf_d);
gpuErrchk(cudaDeviceSynchronize());
}
if (option == 2) {
VanLeerRadialKernel<<<dimGrid2, dimBlock2>>>(Rinf_d, Rsup_d, QRStar_d, DensStar_d, Vrad_d,
LostByDisk_d, NSEC, NRAD, dt, OpenInner, ThetaMomP_d, invSurf_d);
gpuErrchk(cudaDeviceSynchronize());
}
if (option == 3) {
VanLeerRadialKernel<<<dimGrid2, dimBlock2>>>(Rinf_d, Rsup_d, QRStar_d, DensStar_d, Vrad_d,
LostByDisk_d, NSEC, NRAD, dt, OpenInner, ThetaMomM_d, invSurf_d);
gpuErrchk(cudaDeviceSynchronize());
}
if (option == 4) {
VanLeerRadialKernel<<<dimGrid2, dimBlock2>>>(Rinf_d, Rsup_d, QRStar_d, DensStar_d, Vrad_d,
LostByDisk_d, NSEC, NRAD, dt, OpenInner, Energy_d, invSurf_d);
gpuErrchk(cudaDeviceSynchronize());
}
if (option == 6) {
VanLeerRadialKernel<<<dimGrid2, dimBlock2>>>(Rinf_d, Rsup_d, QRStar_d, DensStar_d, Vrad_d,
LostByDisk_d, NSEC, NRAD, dt, OpenInner, Dens_d, invSurf_d);
gpuErrchk(cudaDeviceSynchronize());
}
if (ReturnLost) Lost = DeviceReduce(LostByDisk_d, NSEC);
return Lost;
}
__host__ void ComputeSpeQty (float *Dens, float *label, float *ExtLabel)
{
ComputeSpeQtyKernel<<<dimGrid2, dimBlock2>>>(Label_d, Dens_d, ExtLabel_d, NRAD, NSEC);
gpuErrchk(cudaDeviceSynchronize());
}
__host__ void ComputeVelocities(float *Dens, float *Vrad, float *Vtheta)
{
ComputeVelocitiesKernel<<<dimGrid2, dimBlock2>>>(Vrad_d, Vtheta_d, Dens_d, Rmed_d, ThetaMomP_d,
ThetaMomM_d, RadMomP_d, RadMomM_d, NRAD, NSEC, OmegaFrame);
gpuErrchk(cudaDeviceSynchronize());
}
__host__ void OneWindTheta (float *Dens, float *Vtheta, float *Energy, float dt)
{
ComputeAverageThetaVelocities (Vtheta, dt);
ComputeResiduals (Vtheta, dt);
ComputeConstantResidual (Vtheta, dt); /* Constant residual is in Vtheta from now on */
UniformTransport = NO;
QuantitiesAdvection (Dens, VthetaRes_d, Energy, dt, 0);
UniformTransport = YES;
QuantitiesAdvection (Dens, Vtheta_d, Energy, dt, 1);
AdvectSHIFT (RadMomP_d);
AdvectSHIFT (RadMomM_d);
AdvectSHIFT (ThetaMomP_d);
AdvectSHIFT (ThetaMomM_d);
if (Adiabatic) AdvectSHIFT (Energy_d);
//if (AdvecteLabel) AdvectSHIFT (ExtLabel_d);
AdvectSHIFT (Dens_d);
}
__host__ void ComputeAverageThetaVelocities (float *Vtheta, float dt)
{
ComputeAverageThetaVelocitiesKernel<<<dimGrid4, dimBlock>>>(Vtheta_d, VMed_d, NSEC, NRAD);
gpuErrchk(cudaDeviceSynchronize());
}
__host__ void ComputeResiduals (float *Vtheta, float dt)
{
ComputeResidualsKernel<<<dimGrid2, dimBlock2>>>(VthetaRes_d, VMed_d, NSEC, NRAD, Vtheta_d);
gpuErrchk(cudaDeviceSynchronize());
}
__host__ void AdvectSHIFT (float *array_d)
{
AdvectSHIFTKernel<<<dimGrid2, dimBlock2>>> (array_d, TempShift_d, NSEC, NRAD, Nshift_d);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(array_d, TempShift_d, size_grid*sizeof(float), cudaMemcpyDeviceToDevice));
}
__host__ void ComputeConstantResidual (float *Vtheta, float dt)
{
ComputeConstantResidualKernel<<<dimGrid2, dimBlock2>>>(VMed_d, invRmed_d, Nshift_d, NoSplitAdvection_d,
NSEC, NRAD, dt, Vtheta_d, VthetaRes_d, Rmed_d, FastTransport);
gpuErrchk(cudaDeviceSynchronize());
}
__host__ void QuantitiesAdvection (float *Dens, float *Vazimutal_d, float *Energy, float dt, int option)
{
ComputeStarTheta (Dens_d, Vazimutal_d, DensStar_d, dt);
ActualiseGasDens (DensInt, Dens);
VanLeerTheta (Vazimutal_d, RadMomP_d, dt);
VanLeerTheta (Vazimutal_d, RadMomM_d, dt);
VanLeerTheta (Vazimutal_d, ThetaMomP_d, dt);
VanLeerTheta (Vazimutal_d, ThetaMomM_d, dt);
if (Adiabatic)
VanLeerTheta (Vazimutal_d, Energy_d, dt);
//if (AdvecteLabel)
//VanLeerTheta (Vazimutal_d, ExtLabel_d, dt);
VanLeerTheta (Vazimutal_d, Dens_d, dt); /* MUST be the last line */
}
__host__ void VanLeerTheta (float *Vazimutal_d, float *Qbase_d, float dt)
{
DivisePolarGrid (Qbase_d, DensInt_d, Work_d);
ComputeStarTheta (Work_d, Vazimutal_d, QRStar_d, dt);
VanLeerThetaKernel<<<dimGrid2, dimBlock2>>>(Rsup_d, Rinf_d, Surf_d, dt, NRAD, NSEC, 0,
NoSplitAdvection_d, QRStar_d, DensStar_d, Vazimutal_d, Qbase_d);
gpuErrchk(cudaDeviceSynchronize());
}
__host__ void ComputeStarTheta (float *Qbase_d, float *Vazimutal_d, float *QStar_d, float dt)
{
//gpuErrchk(cudaMemset(dq_d, 0, size_grid*sizeof(float)));
StarThetaKernel<<<dimGrid2, dimBlock2>>> (Qbase_d, Rmed_d, NRAD, NSEC, dq_d, dt);
gpuErrchk(cudaDeviceSynchronize());
StarThetaKernel2<<<dimGrid2, dimBlock2>>>(Qbase_d, Rmed_d, Vazimutal_d, QStar_d, NRAD, NSEC, dq_d, dt);
gpuErrchk(cudaDeviceSynchronize());
}
__host__ void InitTransport ()
{
RadMomP = (float *)malloc(size_grid*sizeof(float));
RadMomM = (float *)malloc(size_grid*sizeof(float));
ThetaMomP = (float *)malloc(size_grid*sizeof(float));
ThetaMomM = (float *)malloc(size_grid*sizeof(float));
Work = (float *)malloc(size_grid*sizeof(float));
QRStar = (float *)malloc(size_grid*sizeof(float));
ExtLabel = (float *)malloc(size_grid*sizeof(float));
VthetaRes = (float *)malloc(size_grid*sizeof(float));
TempShift = (float *)malloc(size_grid*sizeof(float));
dq = (float *)malloc(size_grid*sizeof(float));
InitTransportDevice();
}
__host__ void InitTransportDevice()
{
gpuErrchk(cudaMalloc((void**)&RadMomP_d, size_grid*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&RadMomM_d, size_grid*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&ThetaMomP_d, size_grid*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&ThetaMomM_d, size_grid*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&Work_d, size_grid*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&QRStar_d, size_grid*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&ExtLabel_d, size_grid*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&dq_d, size_grid*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&LostByDisk_d, NSEC*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&VthetaRes_d, size_grid*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&TempShift_d, size_grid*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&VMed_d, NRAD*sizeof(float)));
gpuErrchk(cudaMalloc((void**)&Nshift_d, NRAD*sizeof(int)));
gpuErrchk(cudaMalloc((void**)&NoSplitAdvection_d, NRAD*sizeof(int)));
gpuErrchk(cudaMemset(RadMomP_d, 0, size_grid*sizeof(float)));
gpuErrchk(cudaMemset(RadMomM_d, 0, size_grid*sizeof(float)));
gpuErrchk(cudaMemset(ThetaMomP_d, 0, size_grid*sizeof(float)));
gpuErrchk(cudaMemset(ThetaMomM_d, 0, size_grid*sizeof(float)));
gpuErrchk(cudaMemset(Work_d, 0, size_grid*sizeof(float)));
gpuErrchk(cudaMemset(QRStar_d, 0, size_grid*sizeof(float)));
gpuErrchk(cudaMemset(ExtLabel_d, 0, size_grid*sizeof(float)));
gpuErrchk(cudaMemset(dq_d, 0, size_grid*sizeof(float)));
gpuErrchk(cudaMemset(LostByDisk_d, 0, NSEC*sizeof(float)));
gpuErrchk(cudaMemset(VthetaRes_d, 0, size_grid*sizeof(float)));
gpuErrchk(cudaMemset(TempShift_d, 0, size_grid*sizeof(float)));
gpuErrchk(cudaMemset(VMed_d, 0, NRAD*sizeof(float)));
gpuErrchk(cudaMemset(Nshift_d, 0, NRAD*sizeof(int)));
gpuErrchk(cudaMemset(NoSplitAdvection_d, 0, NRAD*sizeof(int)));
}
|
1f48443f00ca8bc7decd1e57c2cd0529dfe0fa6d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/native/TensorIterator.h>
#include <aten/src/ATen/TensorUtils.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/Resize.h>
constexpr float EPSILON = 1e-12;
namespace {
using namespace at;
void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad)
.add_input(input)
.add_input(target)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() {
at::native::gpu_kernel(iter, [] GPU_LAMBDA (
scalar_t grad_val,
scalar_t input_val,
scalar_t target_val
) -> scalar_t {
const scalar_t one = 1;
const scalar_t epsilon = EPSILON;
scalar_t grad_input_denominator = max(
(one - input_val) * input_val,
epsilon
);
return grad_val * (input_val - target_val) / grad_input_denominator;
}
);
});
}
} // namespace
namespace at { namespace native {
Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction, bool log_target) {
auto grad_input = at::empty_like(input);
if (!log_target) {
TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(target)
.add_input(grad)
.build();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() {
scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0);
gpu_kernel(iter,
[inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) {
return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0);
});
});
}
else {
grad_input = -at::exp(target) * grad;
if (reduction == at::Reduction::Mean) {
grad_input /= input.numel();
}
}
return grad_input;
}
Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss = at::empty_like(input);
return at::native::binary_cross_entropy_out_cuda(
input, target, weight, reduction, loss);
}
Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& loss) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss_squeezed = at::squeeze(loss);
TensorIterator iter = TensorIteratorConfig()
.add_output(loss_squeezed)
.add_owned_input(at::squeeze(input))
.add_owned_input(at::squeeze(target))
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() {
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t {
const scalar_t zero = 0;
const scalar_t one = 1;
const scalar_t neg_100 = -100;
CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one);
scalar_t log_input_val = ::log(input_val);
scalar_t log_1_minus_input_val = ::log(one - input_val);
log_input_val = ::max(log_input_val, neg_100);
log_1_minus_input_val = ::max(log_1_minus_input_val, neg_100);
return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val);
}
);
});
if (weight.defined()) {
loss.mul_(weight);
}
if (reduction != at::Reduction::None) {
Tensor loss_reduced;
if (reduction == at::Reduction::Mean) {
loss_reduced = loss.mean();
} else if (reduction == at::Reduction::Sum) {
loss_reduced = loss.sum();
}
loss.resize_as_(loss_reduced).copy_(loss_reduced);
}
return loss;
}
Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_input = at::empty_like(input);
return at::native::binary_cross_entropy_backward_out_cuda(
grad, input, target, weight, reduction, grad_input);
}
Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& grad_input) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_expand = grad.expand_as(input);
binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target);
if (weight.defined()) {
grad_input.mul_(weight);
}
if (reduction == at::Reduction::Mean) {
grad_input.div_(input.numel());
}
return grad_input;
}
// -----------------------------------
// nll_loss
// -----------------------------------
namespace {
constexpr int NLL_LOSS_THREADS = 32;
#define AT_DISPATCH_NLL_LOSS_INDEX_TYPES(TYPE, NAME, ...) \
[&] { \
at::ScalarType _it = TYPE; \
RECORD_KERNEL_FUNCTION_DTYPE(NAME, _it) \
switch (_it) { \
AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Byte, uint8_t, index_t, __VA_ARGS__) \
AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Long, int64_t, index_t, __VA_ARGS__)\
default: \
AT_ERROR(#NAME, " not implemented for '", toString(_it), "'"); \
} \
}()
template <typename scalar_t, typename index_t>
__global__ void nll_loss_forward_no_reduce_cuda_kernel(
int64_t batch_size,
PackedTensorAccessor64<scalar_t, 2> input,
index_t* target,
scalar_t* output,
scalar_t* weights,
int n_classes,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index];
if (cur_target == ignore_index) {
output[index] = static_cast<scalar_t>(0);
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
auto cur_weight =
weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
output[index] = -cur_weight * input[index][cur_target];
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_1d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
index_t* target,
scalar_t* weights,
bool size_average,
int n_classes,
int64_t ignore_index) {
CUDA_KERNEL_ASSERT(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0);
int t = static_cast<int>(*target);
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
const auto cur_weight = weights != nullptr ? weights[t] : scalar_t{1};
*total_weight = cur_weight;
if (size_average) {
// If we try to normalize a zero then we return a NaN
if (cur_weight == 0) {
*output = std::numeric_limits<scalar_t>::quiet_NaN();
} else {
*output = -input[t];
}
} else {
*output = -cur_weight * input[t];
}
} else {
// If the only element was omited, we get 0. See the discussion in
// https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
*output = scalar_t{0};
}
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_2d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
index_t* target,
scalar_t* weights,
bool size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
__shared__ accscalar_t sh_inputs[NLL_LOSS_THREADS],
acc_weight[NLL_LOSS_THREADS];
sh_inputs[threadIdx.x] = static_cast<accscalar_t>(0);
acc_weight[threadIdx.x] = static_cast<accscalar_t>(0);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
int t = target[i];
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
scalar_t cur_weight =
weights != nullptr ? weights[t] : static_cast<scalar_t>(1);
sh_inputs[threadIdx.x] -= input[i * ndim + t] * cur_weight;
acc_weight[threadIdx.x] += cur_weight;
}
}
__syncthreads();
if (threadIdx.x == 0) {
accscalar_t output_acc = 0;
accscalar_t total_weight_acc = 0;
for (int i = 0; i < NLL_LOSS_THREADS; ++i) {
output_acc += sh_inputs[i];
total_weight_acc += acc_weight[i];
}
*total_weight = static_cast<scalar_t>(total_weight_acc);
if (size_average) {
*output = static_cast<scalar_t>(output_acc / total_weight_acc);
} else {
*output = static_cast<scalar_t>(output_acc);
}
}
}
void nll_loss_forward_out_cuda_template(
const Tensor& output,
const Tensor& total_weight,
const Tensor& input_,
const Tensor& target_,
const Tensor& weight,
int64_t reduction,
int64_t ignore_index) {
auto input = *input_.expect_contiguous();
auto target = *target_.expect_contiguous();
int64_t n_classes = input.size(-1);
int64_t n_dims = input.dim();
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == Reduction::None && n_dims == 2) {
at::native::resize_output(output, {batch_size});
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with
// 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel_index",
[&] {
hipLaunchKernelGGL(( nll_loss_forward_no_reduce_cuda_kernel<scalar_t, index_t>)
, dim3(at::cuda::detail::GET_BLOCKS(batch_size)),
dim3(at::cuda::detail::CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
batch_size,
input.packed_accessor64<scalar_t, 2>(),
target.data_ptr<index_t>(),
output.data_ptr<scalar_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
return;
}
// produce scalar outputs for the reduction case
at::native::resize_output(output, {});
total_weight.resize_({});
if (target.numel() == 0) {
// Here target (and input) have zero elements
// Mean reduction on empty tensors produces NaN. See the discussion in
// https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
if (reduction == Reduction::Mean) {
output.fill_(std::numeric_limits<double>::quiet_NaN());
} else {
output.zero_();
}
total_weight.zero_();
return;
}
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d_index",
[&] {
hipLaunchKernelGGL(( nll_loss_forward_reduce_cuda_kernel_1d<scalar_t, index_t>)
, dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
} else if (n_dims == 2) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d_index",
[&] {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda*/true>;
hipLaunchKernelGGL(( nll_loss_forward_reduce_cuda_kernel_2d<scalar_t, accscalar_t, index_t>)
, dim3(1),
dim3(NLL_LOSS_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_no_reduce_cuda_kernel(
int batch_size,
index_t *target,
PackedTensorAccessor64<scalar_t, 1> grad_output,
PackedTensorAccessor64<scalar_t, 2> grad_input,
scalar_t *weights,
int n_classes,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index];
if (cur_target == ignore_index) {
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
scalar_t weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
grad_input[index][cur_target] = -weight * grad_output[index];
}
};
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_1d(
scalar_t *grad_input,
scalar_t *grad_output,
scalar_t *weights,
index_t *target,
scalar_t *total_weight,
bool size_average,
int n_classes,
int64_t ignore_index
) {
int t = static_cast<int>(*target);
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
const auto grad = -(size_average ? *grad_output / *total_weight
: *grad_output);
grad_input[t] = weights != nullptr ? weights[t] * grad
: grad;
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_2d(
scalar_t* grad_input,
scalar_t* grad_output,
index_t* target,
scalar_t* weights,
scalar_t* total_weight,
bool size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
const auto grad = -(size_average ? *grad_output / *total_weight
: *grad_output);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
int t = target[i];
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
grad_input[i * ndim + t] = weights != nullptr ? weights[t] * grad
: grad;
}
}
}
void nll_loss_backward_out_cuda_template(
const Tensor& grad_input_,
const Tensor& grad_output_,
const Tensor& input_,
const Tensor& target_,
const Tensor& total_weight,
const Tensor& weight,
int64_t reduction,
int64_t ignore_index) {
auto target = *target_.expect_contiguous();
auto input = *input_.expect_contiguous();
auto grad_input = *grad_input_.expect_contiguous();
auto grad_output = *grad_output_.expect_contiguous();
int64_t n_dims = input.dim();
int64_t n_classes = input.size(-1);
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == at::Reduction::None && n_dims == 2) {
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel_index",
[&] {
hipLaunchKernelGGL(( nll_loss_backward_no_reduce_cuda_kernel<scalar_t, index_t>)
, dim3(at::cuda::detail::GET_BLOCKS(batch_size)),
dim3(at::cuda::detail::CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
batch_size,
target.data_ptr<index_t>(),
grad_output.packed_accessor64<scalar_t, 1>(),
grad_input.packed_accessor64<scalar_t, 2>(),
weight.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
return;
}
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d_index",
[&] {
hipLaunchKernelGGL(( nll_loss_backward_reduce_cuda_kernel_1d<scalar_t, index_t>)
, dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
target.data_ptr<index_t>(),
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d_index",
[&] {
hipLaunchKernelGGL(( nll_loss_backward_reduce_cuda_kernel_2d<scalar_t, index_t>)
, dim3(1), dim3(NLL_LOSS_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
}
}
#undef AT_DISPATCH_NLL_LOSS_INDEX_TYPES
} // namespace
TORCH_IMPL_FUNC(nll_loss_forward_out_cuda)
(const Tensor& self,
const Tensor& target,
const OptionalTensorRef weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& output,
const Tensor& total_weight) {
const Tensor& weight = weight_opt.getTensorRef();
nll_loss_forward_out_cuda_template(
output, total_weight, self, target, weight, reduction, ignore_index);
}
TORCH_IMPL_FUNC(nll_loss_backward_out_cuda)
(const Tensor& grad_output,
const Tensor& self,
const Tensor& target,
OptionalTensorRef weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& total_weight,
const Tensor& grad_input) {
const Tensor& weight = weight_opt.getTensorRef();
grad_input.zero_();
nll_loss_backward_out_cuda_template(
grad_input,
grad_output,
self,
target,
total_weight,
weight,
reduction,
ignore_index);
}
}} // namespace at::native
|
1f48443f00ca8bc7decd1e57c2cd0529dfe0fa6d.cu
|
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/native/TensorIterator.h>
#include <aten/src/ATen/TensorUtils.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/Resize.h>
constexpr float EPSILON = 1e-12;
namespace {
using namespace at;
void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad)
.add_input(input)
.add_input(target)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() {
at::native::gpu_kernel(iter, [] GPU_LAMBDA (
scalar_t grad_val,
scalar_t input_val,
scalar_t target_val
) -> scalar_t {
const scalar_t one = 1;
const scalar_t epsilon = EPSILON;
scalar_t grad_input_denominator = max(
(one - input_val) * input_val,
epsilon
);
return grad_val * (input_val - target_val) / grad_input_denominator;
}
);
});
}
} // namespace
namespace at { namespace native {
Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction, bool log_target) {
auto grad_input = at::empty_like(input);
if (!log_target) {
TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(target)
.add_input(grad)
.build();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() {
scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0);
gpu_kernel(iter,
[inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) {
return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0);
});
});
}
else {
grad_input = -at::exp(target) * grad;
if (reduction == at::Reduction::Mean) {
grad_input /= input.numel();
}
}
return grad_input;
}
Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss = at::empty_like(input);
return at::native::binary_cross_entropy_out_cuda(
input, target, weight, reduction, loss);
}
Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& loss) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss_squeezed = at::squeeze(loss);
TensorIterator iter = TensorIteratorConfig()
.add_output(loss_squeezed)
.add_owned_input(at::squeeze(input))
.add_owned_input(at::squeeze(target))
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() {
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t {
const scalar_t zero = 0;
const scalar_t one = 1;
const scalar_t neg_100 = -100;
CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one);
scalar_t log_input_val = std::log(input_val);
scalar_t log_1_minus_input_val = std::log(one - input_val);
log_input_val = std::max(log_input_val, neg_100);
log_1_minus_input_val = std::max(log_1_minus_input_val, neg_100);
return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val);
}
);
});
if (weight.defined()) {
loss.mul_(weight);
}
if (reduction != at::Reduction::None) {
Tensor loss_reduced;
if (reduction == at::Reduction::Mean) {
loss_reduced = loss.mean();
} else if (reduction == at::Reduction::Sum) {
loss_reduced = loss.sum();
}
loss.resize_as_(loss_reduced).copy_(loss_reduced);
}
return loss;
}
Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_input = at::empty_like(input);
return at::native::binary_cross_entropy_backward_out_cuda(
grad, input, target, weight, reduction, grad_input);
}
Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& grad_input) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_expand = grad.expand_as(input);
binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target);
if (weight.defined()) {
grad_input.mul_(weight);
}
if (reduction == at::Reduction::Mean) {
grad_input.div_(input.numel());
}
return grad_input;
}
// -----------------------------------
// nll_loss
// -----------------------------------
namespace {
constexpr int NLL_LOSS_THREADS = 32;
#define AT_DISPATCH_NLL_LOSS_INDEX_TYPES(TYPE, NAME, ...) \
[&] { \
at::ScalarType _it = TYPE; \
RECORD_KERNEL_FUNCTION_DTYPE(NAME, _it) \
switch (_it) { \
AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Byte, uint8_t, index_t, __VA_ARGS__) \
AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Long, int64_t, index_t, __VA_ARGS__)\
default: \
AT_ERROR(#NAME, " not implemented for '", toString(_it), "'"); \
} \
}()
template <typename scalar_t, typename index_t>
__global__ void nll_loss_forward_no_reduce_cuda_kernel(
int64_t batch_size,
PackedTensorAccessor64<scalar_t, 2> input,
index_t* target,
scalar_t* output,
scalar_t* weights,
int n_classes,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index];
if (cur_target == ignore_index) {
output[index] = static_cast<scalar_t>(0);
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
auto cur_weight =
weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
output[index] = -cur_weight * input[index][cur_target];
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_1d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
index_t* target,
scalar_t* weights,
bool size_average,
int n_classes,
int64_t ignore_index) {
CUDA_KERNEL_ASSERT(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0);
int t = static_cast<int>(*target);
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
const auto cur_weight = weights != nullptr ? weights[t] : scalar_t{1};
*total_weight = cur_weight;
if (size_average) {
// If we try to normalize a zero then we return a NaN
if (cur_weight == 0) {
*output = std::numeric_limits<scalar_t>::quiet_NaN();
} else {
*output = -input[t];
}
} else {
*output = -cur_weight * input[t];
}
} else {
// If the only element was omited, we get 0. See the discussion in
// https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
*output = scalar_t{0};
}
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_2d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
index_t* target,
scalar_t* weights,
bool size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
__shared__ accscalar_t sh_inputs[NLL_LOSS_THREADS],
acc_weight[NLL_LOSS_THREADS];
sh_inputs[threadIdx.x] = static_cast<accscalar_t>(0);
acc_weight[threadIdx.x] = static_cast<accscalar_t>(0);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
int t = target[i];
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
scalar_t cur_weight =
weights != nullptr ? weights[t] : static_cast<scalar_t>(1);
sh_inputs[threadIdx.x] -= input[i * ndim + t] * cur_weight;
acc_weight[threadIdx.x] += cur_weight;
}
}
__syncthreads();
if (threadIdx.x == 0) {
accscalar_t output_acc = 0;
accscalar_t total_weight_acc = 0;
for (int i = 0; i < NLL_LOSS_THREADS; ++i) {
output_acc += sh_inputs[i];
total_weight_acc += acc_weight[i];
}
*total_weight = static_cast<scalar_t>(total_weight_acc);
if (size_average) {
*output = static_cast<scalar_t>(output_acc / total_weight_acc);
} else {
*output = static_cast<scalar_t>(output_acc);
}
}
}
void nll_loss_forward_out_cuda_template(
const Tensor& output,
const Tensor& total_weight,
const Tensor& input_,
const Tensor& target_,
const Tensor& weight,
int64_t reduction,
int64_t ignore_index) {
auto input = *input_.expect_contiguous();
auto target = *target_.expect_contiguous();
int64_t n_classes = input.size(-1);
int64_t n_dims = input.dim();
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == Reduction::None && n_dims == 2) {
at::native::resize_output(output, {batch_size});
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with
// 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel_index",
[&] {
nll_loss_forward_no_reduce_cuda_kernel<scalar_t, index_t>
<<<at::cuda::detail::GET_BLOCKS(batch_size),
at::cuda::detail::CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
batch_size,
input.packed_accessor64<scalar_t, 2>(),
target.data_ptr<index_t>(),
output.data_ptr<scalar_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
return;
}
// produce scalar outputs for the reduction case
at::native::resize_output(output, {});
total_weight.resize_({});
if (target.numel() == 0) {
// Here target (and input) have zero elements
// Mean reduction on empty tensors produces NaN. See the discussion in
// https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
if (reduction == Reduction::Mean) {
output.fill_(std::numeric_limits<double>::quiet_NaN());
} else {
output.zero_();
}
total_weight.zero_();
return;
}
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d_index",
[&] {
nll_loss_forward_reduce_cuda_kernel_1d<scalar_t, index_t>
<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
} else if (n_dims == 2) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d_index",
[&] {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda*/true>;
nll_loss_forward_reduce_cuda_kernel_2d<scalar_t, accscalar_t, index_t>
<<<1,
NLL_LOSS_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_no_reduce_cuda_kernel(
int batch_size,
index_t *target,
PackedTensorAccessor64<scalar_t, 1> grad_output,
PackedTensorAccessor64<scalar_t, 2> grad_input,
scalar_t *weights,
int n_classes,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index];
if (cur_target == ignore_index) {
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
scalar_t weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
grad_input[index][cur_target] = -weight * grad_output[index];
}
};
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_1d(
scalar_t *grad_input,
scalar_t *grad_output,
scalar_t *weights,
index_t *target,
scalar_t *total_weight,
bool size_average,
int n_classes,
int64_t ignore_index
) {
int t = static_cast<int>(*target);
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
const auto grad = -(size_average ? *grad_output / *total_weight
: *grad_output);
grad_input[t] = weights != nullptr ? weights[t] * grad
: grad;
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_2d(
scalar_t* grad_input,
scalar_t* grad_output,
index_t* target,
scalar_t* weights,
scalar_t* total_weight,
bool size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
const auto grad = -(size_average ? *grad_output / *total_weight
: *grad_output);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
int t = target[i];
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
grad_input[i * ndim + t] = weights != nullptr ? weights[t] * grad
: grad;
}
}
}
void nll_loss_backward_out_cuda_template(
const Tensor& grad_input_,
const Tensor& grad_output_,
const Tensor& input_,
const Tensor& target_,
const Tensor& total_weight,
const Tensor& weight,
int64_t reduction,
int64_t ignore_index) {
auto target = *target_.expect_contiguous();
auto input = *input_.expect_contiguous();
auto grad_input = *grad_input_.expect_contiguous();
auto grad_output = *grad_output_.expect_contiguous();
int64_t n_dims = input.dim();
int64_t n_classes = input.size(-1);
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == at::Reduction::None && n_dims == 2) {
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel_index",
[&] {
nll_loss_backward_no_reduce_cuda_kernel<scalar_t, index_t>
<<<at::cuda::detail::GET_BLOCKS(batch_size),
at::cuda::detail::CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
batch_size,
target.data_ptr<index_t>(),
grad_output.packed_accessor64<scalar_t, 1>(),
grad_input.packed_accessor64<scalar_t, 2>(),
weight.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
return;
}
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d_index",
[&] {
nll_loss_backward_reduce_cuda_kernel_1d<scalar_t, index_t>
<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
target.data_ptr<index_t>(),
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d_index",
[&] {
nll_loss_backward_reduce_cuda_kernel_2d<scalar_t, index_t>
<<<1, NLL_LOSS_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
}
}
#undef AT_DISPATCH_NLL_LOSS_INDEX_TYPES
} // namespace
TORCH_IMPL_FUNC(nll_loss_forward_out_cuda)
(const Tensor& self,
const Tensor& target,
const OptionalTensorRef weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& output,
const Tensor& total_weight) {
const Tensor& weight = weight_opt.getTensorRef();
nll_loss_forward_out_cuda_template(
output, total_weight, self, target, weight, reduction, ignore_index);
}
TORCH_IMPL_FUNC(nll_loss_backward_out_cuda)
(const Tensor& grad_output,
const Tensor& self,
const Tensor& target,
OptionalTensorRef weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& total_weight,
const Tensor& grad_input) {
const Tensor& weight = weight_opt.getTensorRef();
grad_input.zero_();
nll_loss_backward_out_cuda_template(
grad_input,
grad_output,
self,
target,
total_weight,
weight,
reduction,
ignore_index);
}
}} // namespace at::native
|
4f8b06fa95ce9e3e5f7265b0a85530d67c8c8dd8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CUB_STDERR
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pow_op.h"
#include "caffe2/utils/conversions.h"
namespace caffe2 {
// pow, log and other math functions are defined in
// CUDA math library in header file math.h
#define CUDA_POW(x, y) (pow(x, y))
template <int b_is_scalar, typename T1, typename T2, typename R>
__global__ void PowKernel(const T1* a, const T2* b, T2 e, R* out, int n) {
CUDA_1D_KERNEL_LOOP(i, n) {
out[i] = CUDA_POW(a[i], ((b == NULL) ? e : b[b_is_scalar ? 0 : i]));
}
}
template <typename T1, typename T2, typename R>
__global__ void
PowBroadcastKernel(const T1* a, const T2* b, R* out, int pre, int n) {
CUDA_1D_KERNEL_LOOP(i, pre * n) {
out[i] = CUDA_POW(a[i], b[i % n]);
}
}
template <typename T1, typename T2, typename R>
__global__ void PowBroadcast2Kernel(
const T1* a,
const T2* b,
R* out,
int pre,
int n,
int post) {
CUDA_1D_KERNEL_LOOP(i, pre * n * post) {
out[i] = CUDA_POW(a[i], b[(i / post) % n]);
}
}
struct CudaPowFunctor {
template <bool b_is_scalar, typename T1, typename T2, typename R>
inline void
Run(size_t n, const T1* a, const T2* b, T2 e, R* out, CUDAContext* context) {
hipLaunchKernelGGL(( PowKernel<b_is_scalar, T1, T2, R>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), a, b, e, out, n);
}
template <typename T1, typename T2, typename R>
void RunWithBroadcast(
const T1* a,
const T2* b,
R* out,
size_t pre,
size_t n,
CUDAContext* context) {
hipLaunchKernelGGL(( PowBroadcastKernel<T1, T2, R>)
, dim3(CAFFE_GET_BLOCKS(pre * n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), a, b, out, pre, n);
}
template <typename T1, typename T2, typename R>
void RunWithBroadcast2(
const T1* a,
const T2* b,
R* out,
size_t pre,
size_t n,
size_t post,
CUDAContext* context) {
hipLaunchKernelGGL(( PowBroadcast2Kernel<T1, T2, R>)
, dim3(CAFFE_GET_BLOCKS(pre * n * post)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), a, b, out, pre, n, post);
}
};
REGISTER_CUDA_OPERATOR(
Pow,
PowOp<
TensorTypes<float> /*NumericTypes*/,
CUDAContext,
CudaPowFunctor,
SameTypeAsInput>)
} // namespace caffe2
|
4f8b06fa95ce9e3e5f7265b0a85530d67c8c8dd8.cu
|
#define CUB_STDERR
#include <cub/block/block_load.cuh>
#include <cub/block/block_reduce.cuh>
#include <cub/device/device_reduce.cuh>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pow_op.h"
#include "caffe2/utils/conversions.h"
namespace caffe2 {
// pow, log and other math functions are defined in
// CUDA math library in header file math.h
#define CUDA_POW(x, y) (pow(x, y))
template <int b_is_scalar, typename T1, typename T2, typename R>
__global__ void PowKernel(const T1* a, const T2* b, T2 e, R* out, int n) {
CUDA_1D_KERNEL_LOOP(i, n) {
out[i] = CUDA_POW(a[i], ((b == NULL) ? e : b[b_is_scalar ? 0 : i]));
}
}
template <typename T1, typename T2, typename R>
__global__ void
PowBroadcastKernel(const T1* a, const T2* b, R* out, int pre, int n) {
CUDA_1D_KERNEL_LOOP(i, pre * n) {
out[i] = CUDA_POW(a[i], b[i % n]);
}
}
template <typename T1, typename T2, typename R>
__global__ void PowBroadcast2Kernel(
const T1* a,
const T2* b,
R* out,
int pre,
int n,
int post) {
CUDA_1D_KERNEL_LOOP(i, pre * n * post) {
out[i] = CUDA_POW(a[i], b[(i / post) % n]);
}
}
struct CudaPowFunctor {
template <bool b_is_scalar, typename T1, typename T2, typename R>
inline void
Run(size_t n, const T1* a, const T2* b, T2 e, R* out, CUDAContext* context) {
PowKernel<b_is_scalar, T1, T2, R>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(a, b, e, out, n);
}
template <typename T1, typename T2, typename R>
void RunWithBroadcast(
const T1* a,
const T2* b,
R* out,
size_t pre,
size_t n,
CUDAContext* context) {
PowBroadcastKernel<T1, T2, R>
<<<CAFFE_GET_BLOCKS(pre * n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(a, b, out, pre, n);
}
template <typename T1, typename T2, typename R>
void RunWithBroadcast2(
const T1* a,
const T2* b,
R* out,
size_t pre,
size_t n,
size_t post,
CUDAContext* context) {
PowBroadcast2Kernel<T1, T2, R>
<<<CAFFE_GET_BLOCKS(pre * n * post),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(a, b, out, pre, n, post);
}
};
REGISTER_CUDA_OPERATOR(
Pow,
PowOp<
TensorTypes<float> /*NumericTypes*/,
CUDAContext,
CudaPowFunctor,
SameTypeAsInput>)
} // namespace caffe2
|
2e4240dd87e0404dd1250ac8ec6fe5e356ff8f73.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017 Madhavan Seshadri
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <hip/hip_runtime.h>
#include <iostream>
#include <cmath>
#include "validation.h"
#include "examples/opencl/benchmark_vector/timer.hpp"
//###########################################################################
//Kernels
//###########################################################################
__global__ void dgemm(double *A, double *B, double *C, int m, int n, int k, double alpha, double beta){
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
if(ROW < (m) && COL < (n)){
double sum = 0;
for(int i = 0;i<k;i++)
sum+=(alpha) * A[ROW * (k) + i] * B[i*(n)+COL];
C[ROW*(n)+COL] = sum + (beta) * C[ROW*(n)+COL];
}
}
//###########################################################################
//Main
//###########################################################################
int main(int argc, char*argv[]) {
if (argc != 4) {
std::cout << "Usage: " << argv[0] << " #m #n #k";
exit(1);
}
int m,n,k,i;
//Initilizing the matrix dimensions
m = atoi(argv[1]);
n = atoi(argv[2]);
k = atoi(argv[3]);
double time = 0;
timer_start();
double *A, *B, *C;
double *A_dev, *B_dev, *C_dev;
double alpha, beta;
//initializing values of alpha and beta
alpha = 1.0;
beta = 0.0;
/*
* Malloc data on host and device
*/
//Malloc Host
hipHostMalloc((void**) &A, m*k*sizeof( double ));
hipHostMalloc((void**) &B, n*k*sizeof( double ));
hipHostMalloc((void**) &C, m*n*sizeof( double ));
//Malloc Device
hipMalloc((void**) &A_dev, m*k*sizeof( double ));
hipMalloc((void**) &B_dev, n*k*sizeof( double ));
hipMalloc((void**) &C_dev, m*n*sizeof( double ));
time+=timer_stop();
//printf (" Intializing matrix data \n\n");
timer_start();
for (i = 0; i < (m*k); i++) {
A[i] = (double)(i+1);
}
for (i = 0; i < (k*n); i++) {
B[i] = (double)(-i-1);
}
for (i = 0; i < (m*n); i++) {
C[i] = 0.0;
}
dim3 blocksize(32,32);
dim3 gridsize(1+ceil(m / blocksize.x),1+ceil(n / blocksize.y));
/*
* Copy data
*/
hipMemcpy(A_dev, A, m*k*sizeof( double ), hipMemcpyHostToDevice);
hipMemcpy(B_dev, B, n*k*sizeof( double ), hipMemcpyHostToDevice);
hipMemcpy(C_dev, C, m*n*sizeof( double ), hipMemcpyHostToDevice);
/*
* Kernel launch
*/
hipLaunchKernelGGL(( dgemm), dim3(gridsize), dim3(blocksize), 0, 0, A_dev, B_dev, C_dev, m, n, k, alpha, beta);
hipDeviceSynchronize();
/*
* Copy result back
*/
hipMemcpy(C, C_dev, m*n*sizeof( double ), hipMemcpyDeviceToHost);
/*
* Free
*/
hipHostFree(A);
hipHostFree(B);
hipHostFree(C);
hipFree(A_dev);
hipFree(B_dev);
hipFree(C_dev);
//Printing the end timing result
time+=timer_stop();
std::cout << time << " ";
// Validating the result
std::cout << validateDgemm(A, B, C, alpha, beta, n, m, k) << std::endl;
return EXIT_SUCCESS;
}
|
2e4240dd87e0404dd1250ac8ec6fe5e356ff8f73.cu
|
// Copyright (c) 2017 Madhavan Seshadri
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <cuda.h>
#include <iostream>
#include <cmath>
#include "validation.h"
#include "examples/opencl/benchmark_vector/timer.hpp"
//###########################################################################
//Kernels
//###########################################################################
__global__ void dgemm(double *A, double *B, double *C, int m, int n, int k, double alpha, double beta){
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
if(ROW < (m) && COL < (n)){
double sum = 0;
for(int i = 0;i<k;i++)
sum+=(alpha) * A[ROW * (k) + i] * B[i*(n)+COL];
C[ROW*(n)+COL] = sum + (beta) * C[ROW*(n)+COL];
}
}
//###########################################################################
//Main
//###########################################################################
int main(int argc, char*argv[]) {
if (argc != 4) {
std::cout << "Usage: " << argv[0] << " #m #n #k";
exit(1);
}
int m,n,k,i;
//Initilizing the matrix dimensions
m = atoi(argv[1]);
n = atoi(argv[2]);
k = atoi(argv[3]);
double time = 0;
timer_start();
double *A, *B, *C;
double *A_dev, *B_dev, *C_dev;
double alpha, beta;
//initializing values of alpha and beta
alpha = 1.0;
beta = 0.0;
/*
* Malloc data on host and device
*/
//Malloc Host
cudaMallocHost((void**) &A, m*k*sizeof( double ));
cudaMallocHost((void**) &B, n*k*sizeof( double ));
cudaMallocHost((void**) &C, m*n*sizeof( double ));
//Malloc Device
cudaMalloc((void**) &A_dev, m*k*sizeof( double ));
cudaMalloc((void**) &B_dev, n*k*sizeof( double ));
cudaMalloc((void**) &C_dev, m*n*sizeof( double ));
time+=timer_stop();
//printf (" Intializing matrix data \n\n");
timer_start();
for (i = 0; i < (m*k); i++) {
A[i] = (double)(i+1);
}
for (i = 0; i < (k*n); i++) {
B[i] = (double)(-i-1);
}
for (i = 0; i < (m*n); i++) {
C[i] = 0.0;
}
dim3 blocksize(32,32);
dim3 gridsize(1+ceil(m / blocksize.x),1+ceil(n / blocksize.y));
/*
* Copy data
*/
cudaMemcpy(A_dev, A, m*k*sizeof( double ), cudaMemcpyHostToDevice);
cudaMemcpy(B_dev, B, n*k*sizeof( double ), cudaMemcpyHostToDevice);
cudaMemcpy(C_dev, C, m*n*sizeof( double ), cudaMemcpyHostToDevice);
/*
* Kernel launch
*/
dgemm<<<gridsize, blocksize>>>(A_dev, B_dev, C_dev, m, n, k, alpha, beta);
cudaDeviceSynchronize();
/*
* Copy result back
*/
cudaMemcpy(C, C_dev, m*n*sizeof( double ), cudaMemcpyDeviceToHost);
/*
* Free
*/
cudaFreeHost(A);
cudaFreeHost(B);
cudaFreeHost(C);
cudaFree(A_dev);
cudaFree(B_dev);
cudaFree(C_dev);
//Printing the end timing result
time+=timer_stop();
std::cout << time << " ";
// Validating the result
std::cout << validateDgemm(A, B, C, alpha, beta, n, m, k) << std::endl;
return EXIT_SUCCESS;
}
|
4ca7d01b42e1160c77c19f776f640ac5b203dca7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include <sys/time.h>
/**
* KERNEL cuAdd() - Takes 2 input arrays of same size N and adds them into C.
* Locations are found by computing the global index of each thread.
* @return
*/
__global__ void cuAdd(int *a,int *b,int *c, int N)
{
// 1D global index
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if(offset < N)
{
c[offset] = a[offset] + b[offset];
}
}
/**
* KERNEL cuMult() - Takes two 2D matrices and multiplies them
* @param a - 1st Matrix
* @param b - 2nd Matrix
* @param c - Result Matrix
* @param wA - length of A and depth of B
* @param wB - length of matrix B and C
* @param hA - depth of matrix A and C
*/
__global__ void cuMult(float *a, float *b, float *c, int wA, int wB, int hA)
{
// global index
int gidx = blockDim.x * blockIdx.x + threadIdx.x; // col
int gidy = blockDim.y * blockIdx.y + threadIdx.y; // row
if(gidx < wB && gidy < hA)
{
float sum = 0.f;
for(int k=0; k<wA; k++)
{
// Multiply row of A by column of B
sum += a[gidy*wA + k] * b[k*wB +gidx];
}
c[gidy * wB + gidx] = sum;
}
}
/**
* KERNEL cuMultOpti() - Takes two 2D matrices and multiplies them optimally
* @param a - 1st Matrix
* @param b - 2nd Matrix
* @param c - Result Matrix
* @param wA - length of A and depth of B
* @param wB - length of matrix B and C
* @param hA - depth of matrix A and C
*/
__global__ void cuMultOpti(
float *a,
float *b,
float *c,
int wA,
int wB,
int hA)
{
#define blockTile 16
/* Blocksize is 16x16 */
/* Allocate shared memory */
__shared__ float aBlock[blockTile][blockTile];
__shared__ float bBlock[blockTile][blockTile];
/* Calculate global index X, Y*/
int gx = blockDim.x * blockIdx.x + threadIdx.x; // column
int gy = blockDim.y * blockIdx.y + threadIdx.y; // row
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
/* Compute offset idx for A & B */
// First A index (row shift) BlockRow*BlockWidth*Width-A
int a0 = wA * 16 * by;
// aBegin -> last element in row -> + width - 1
int aZ = a0 + wA - 1;
// Column block iteration = blockDim.x
int aD = 16;
// b_0 -> Column Shift
int b0 = 16 * bx;
// Row block iteration = blockDim.y * width B
int bD = 16 * wB;
float sum = 0.f;
for(int aI = a0, bI = b0; aI <= aZ; aI += aD, bI += bD)
{
/* Assign shared memory and sync */
/* Warning, wA*gidy may be out of bounds */
aBlock[ty][tx] = a[aI + ty*wA + tx];
bBlock[ty][tx] = b[bI + ty*wB + tx];
/* Make sure all of the threads have cached the memory */
__syncthreads();
/* Sum over NK */
for(int k=0; k < 16; k++)
{
/* C = (A x B) */
sum += aBlock[ty][k] * bBlock[k][tx];
}
}
c[gy*wB + gx] = sum;
//c[i * NJ + j] = ALPHA*sum + BETA*c[i * NJ + j];
}
/**
* HOST h_MatrixMult_Naive() - Takes two 2D matrices and multiplies them naively
* @param a wA.hA - 1st Matrix
* @param b wB.wA - 2nd Matrix
* @param c hA.wB - Result Matrix
* @param wA - length of A and depth of B
* @param wB - length of matrix B and C
* @param hA - depth of matrix A and C
*/
void h_MatrixMult_Naive(
float *a,
float *b,
float *c,
int wA,
int wB,
int hA)
{
// Iterate through all rows of a
for(int i=0; i<hA; i++)
{
// Iterate through all columns of b
for(int j=0; j<wB; j++)
{
// Calculate all of c[i][j] products
int sum = 0;
for(int k=0; k<wA; k++)
{
sum += a[i*wA + k] * b[k*wB + j];
}
assert(i*wB + j < hA*wB);
// Index - row i of column j with column width of wB
c[i * wB + j] = sum;
}
}
}
double microSeconds()
{
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, NULL);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
/**
* ENTRY main() - Tests <<<>>>cuMult() kernel: Initializes memory and data on
* the host, then memory on the device. Copies the data from host to device,
* executes kernel with memory device pointers, copies result back to host,
* displays results for error checking and frees allocated memory.
* @return
*/
int main(int argc, char ** argv)
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printf("Device %d (%s) has compute capability %d.%d.\nWarp Size: %d",
device, deviceProp.name, deviceProp.major, deviceProp.minor, deviceProp.warpSize);
}
return 0;
//
// // width A
// int wA = 512;
// // height A
// int hA = 512;
//
// // width B
// int wB = 512;
// // height B
// int hB = wA;
//
// // value A
// float aValue = 1.0;
// // value B
// float bValue = 2.0;
//
// /* Fetch the test parameters */
// if(argc < 6)
// {
// printf("Using default parameters: 320 640 320 1 2\n");
// }
// else
// {
// wA = atoi(argv[1]);
// hA = atoi(argv[2]);
// wB = atoi(argv[3]);
// hB = wA;
// aValue = atoi(argv[4]);
// bValue = atoi(argv[5]);
// }
// /**
// * Neutral - both for host and device */
//
// int wC = wB;
// int hC = hA;
//
// size_t size_a = sizeof(float) * wA * hA;
// size_t size_b = sizeof(float) * wB * hB;
// size_t size_c = sizeof(float) * wC * hC;
//
//
// // host
// float *a, *b, *c, *hh_c;
// a = (float *) malloc(size_a);
// b = (float *) malloc(size_b);
// c = (float *) malloc(size_c);
// /* Host test memory */
// hh_c = (float *) malloc(size_c);
//
// assert(hh_c != NULL);
//
// /**
// * Device specific */
//
// // device
// float *_a, *_b, *_c;
// hipMalloc( (void **) &_a, size_a );
// hipMalloc( (void **) &_b, size_b );
// hipMalloc( (void **) &_c, size_c );
//
// /**
// Neutral */
// // initialize A
// for(int i=0; i < hA * wA; i++)
// {
// a[i] = aValue;
// }
//
// // initialize B
// for(int i=0; i < hB * wB; i++)
// {
// b[i] = bValue;
// }
//
// /**
// Device*/
//
// // copy data to GPU
// hipMemcpy(_a, a, size_a, hipMemcpyHostToDevice);
// hipMemcpy(_b, b, size_b, hipMemcpyHostToDevice);
//
// // x : col , y: row
// dim3 blockSize(16,16);
// // (N.x + blockSize.x - 1)/blockSize.x, (N.y + blockSize.y -1)/blockSize.y)
// dim3 gridSize((wC+15)/16, (hC+15)/16);
//
// hipError_t error;
//
// hipEvent_t start, stop;
//
// ///////////////////////////////////////////////////
// // OPTIMIZED
//
// error = hipEventCreate(&start);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to create start event (error code %s)!\n",
// hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// error = hipEventCreate(&stop);
//
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to create stop event (error code %s)!\n",
// hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// // Record the start event
// error = hipEventRecord(start, NULL);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to record start event (error code %s)!\n",
// hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
//
// // kernel execution
// cuMult<<< gridSize, blockSize >>>(_a, _b, _c, wA, wB, hA);
//
// // Record the stop event
// error = hipEventRecord(stop, NULL);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to record stop event (error code %s)!\n",
// hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// // Wait for the stop event to complete
// error = hipEventSynchronize(stop);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n",
// hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// float sgemm_msec = 0.f;
// error = hipEventElapsedTime(&sgemm_msec, start, stop);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n",
// hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
//
//
// // C := alpha*op( A )*op( B ) + beta*C
// // GEMM performs 4 floating point operations for one data output
// //double flops_sgemm = 4.0 * (double) NI * (double) NJ * (double) NK;
//
// //double gigaFlops = (flops_sgemm * 1.0e-9f) / (sgemm_msec / 1000.f);
//
// printf("%.4f\t", sgemm_msec);
//// printf("N_Time: %.3f\n, WorkgroupSize= %u threads/block\n",
//// //gigaFlops,
//// sgemm_msec,
//// //flops_sgemm,
//// blockSize.x * blockSize.y);
//
//
// // copy data back to CPU
// hipMemcpy(c, _c, size_c, hipMemcpyDeviceToHost);
//
// /////////////////////////////////////////////////
// // OPTIMIZED
//
// error = hipEventCreate(&start);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to create start event (error code %s)!\n",
// hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// error = hipEventCreate(&stop);
//
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to create stop event (error code %s)!\n",
// hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// // Record the start event
// error = hipEventRecord(start, NULL);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to record start event (error code %s)!\n",
// hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
//
// // kernel execution
// cuMultOpti<<< gridSize, blockSize >>>(_a, _b, _c, wA, wB, hA);
//
// // Record the stop event
// error = hipEventRecord(stop, NULL);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to record stop event (error code %s)!\n",
// hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// // Wait for the stop event to complete
// error = hipEventSynchronize(stop);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n",
// hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// sgemm_msec = 0.f;
// error = hipEventElapsedTime(&sgemm_msec, start, stop);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n",
// hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
//// printf("O_Time: %.3f\nWorkgroupSize= %u threads/block\n",
//// //gigaFlops,
//// sgemm_msec,
//// //flops_sgemm,
//// blockSize.x * blockSize.y);
// printf("%.4f\t", sgemm_msec);
//
//
//
// // copy data back to CPU
// hipMemcpy(c, _c, size_c, hipMemcpyDeviceToHost);
//
// //////////////////////////////////////////////////
// // HOST
//
// // compare with cpu results
// /**
// Host*/
// double h_start, h_end;
// h_start = microSeconds();
// h_MatrixMult_Naive(a, b, hh_c, wA, wB, hA);
// h_end = microSeconds();
//
// printf("%4.4f\t", (h_end - h_start) * 1000);
//
// // Check first and last memory location
// //printf("Start: %d. Finish: %d.\n",c[2], c[wC * hC - 1]);
//
// /* Check */
//// // Naive check
//// int k = 0;
//// while(c[k] == c[k+1])
//// k++;
//// printf("EQ Test: Breakpoint @ %d\n",k);
//
// int fail = 0;
// for( int k = 0; k< wB*hA; k++)
// {
// if(abs(c[k] - hh_c[k]) > 1e-5)
// fail++;
// }
// printf("\nWorkgroup: %d Data: %d Failures: %d\n", blockSize.x*blockSize.y, wC, fail);
//
// // release resources
// hipFree(_a);
// hipFree(_b);
// hipFree(_c);
//
// free(a);
// free(b);
// free(c);
// free(hh_c);
//
// return 0;
}
|
4ca7d01b42e1160c77c19f776f640ac5b203dca7.cu
|
#include <stdio.h>
#include <assert.h>
#include <sys/time.h>
/**
* KERNEL cuAdd() - Takes 2 input arrays of same size N and adds them into C.
* Locations are found by computing the global index of each thread.
* @return
*/
__global__ void cuAdd(int *a,int *b,int *c, int N)
{
// 1D global index
int offset = blockDim.x * blockIdx.x + threadIdx.x;
if(offset < N)
{
c[offset] = a[offset] + b[offset];
}
}
/**
* KERNEL cuMult() - Takes two 2D matrices and multiplies them
* @param a - 1st Matrix
* @param b - 2nd Matrix
* @param c - Result Matrix
* @param wA - length of A and depth of B
* @param wB - length of matrix B and C
* @param hA - depth of matrix A and C
*/
__global__ void cuMult(float *a, float *b, float *c, int wA, int wB, int hA)
{
// global index
int gidx = blockDim.x * blockIdx.x + threadIdx.x; // col
int gidy = blockDim.y * blockIdx.y + threadIdx.y; // row
if(gidx < wB && gidy < hA)
{
float sum = 0.f;
for(int k=0; k<wA; k++)
{
// Multiply row of A by column of B
sum += a[gidy*wA + k] * b[k*wB +gidx];
}
c[gidy * wB + gidx] = sum;
}
}
/**
* KERNEL cuMultOpti() - Takes two 2D matrices and multiplies them optimally
* @param a - 1st Matrix
* @param b - 2nd Matrix
* @param c - Result Matrix
* @param wA - length of A and depth of B
* @param wB - length of matrix B and C
* @param hA - depth of matrix A and C
*/
__global__ void cuMultOpti(
float *a,
float *b,
float *c,
int wA,
int wB,
int hA)
{
#define blockTile 16
/* Blocksize is 16x16 */
/* Allocate shared memory */
__shared__ float aBlock[blockTile][blockTile];
__shared__ float bBlock[blockTile][blockTile];
/* Calculate global index X, Y*/
int gx = blockDim.x * blockIdx.x + threadIdx.x; // column
int gy = blockDim.y * blockIdx.y + threadIdx.y; // row
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
/* Compute offset idx for A & B */
// First A index (row shift) BlockRow*BlockWidth*Width-A
int a0 = wA * 16 * by;
// aBegin -> last element in row -> + width - 1
int aZ = a0 + wA - 1;
// Column block iteration = blockDim.x
int aD = 16;
// b_0 -> Column Shift
int b0 = 16 * bx;
// Row block iteration = blockDim.y * width B
int bD = 16 * wB;
float sum = 0.f;
for(int aI = a0, bI = b0; aI <= aZ; aI += aD, bI += bD)
{
/* Assign shared memory and sync */
/* Warning, wA*gidy may be out of bounds */
aBlock[ty][tx] = a[aI + ty*wA + tx];
bBlock[ty][tx] = b[bI + ty*wB + tx];
/* Make sure all of the threads have cached the memory */
__syncthreads();
/* Sum over NK */
for(int k=0; k < 16; k++)
{
/* C = (A x B) */
sum += aBlock[ty][k] * bBlock[k][tx];
}
}
c[gy*wB + gx] = sum;
//c[i * NJ + j] = ALPHA*sum + BETA*c[i * NJ + j];
}
/**
* HOST h_MatrixMult_Naive() - Takes two 2D matrices and multiplies them naively
* @param a wA.hA - 1st Matrix
* @param b wB.wA - 2nd Matrix
* @param c hA.wB - Result Matrix
* @param wA - length of A and depth of B
* @param wB - length of matrix B and C
* @param hA - depth of matrix A and C
*/
void h_MatrixMult_Naive(
float *a,
float *b,
float *c,
int wA,
int wB,
int hA)
{
// Iterate through all rows of a
for(int i=0; i<hA; i++)
{
// Iterate through all columns of b
for(int j=0; j<wB; j++)
{
// Calculate all of c[i][j] products
int sum = 0;
for(int k=0; k<wA; k++)
{
sum += a[i*wA + k] * b[k*wB + j];
}
assert(i*wB + j < hA*wB);
// Index - row i of column j with column width of wB
c[i * wB + j] = sum;
}
}
}
double microSeconds()
{
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, NULL);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
/**
* ENTRY main() - Tests <<<>>>cuMult() kernel: Initializes memory and data on
* the host, then memory on the device. Copies the data from host to device,
* executes kernel with memory device pointers, copies result back to host,
* displays results for error checking and frees allocated memory.
* @return
*/
int main(int argc, char ** argv)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d (%s) has compute capability %d.%d.\nWarp Size: %d",
device, deviceProp.name, deviceProp.major, deviceProp.minor, deviceProp.warpSize);
}
return 0;
//
// // width A
// int wA = 512;
// // height A
// int hA = 512;
//
// // width B
// int wB = 512;
// // height B
// int hB = wA;
//
// // value A
// float aValue = 1.0;
// // value B
// float bValue = 2.0;
//
// /* Fetch the test parameters */
// if(argc < 6)
// {
// printf("Using default parameters: 320 640 320 1 2\n");
// }
// else
// {
// wA = atoi(argv[1]);
// hA = atoi(argv[2]);
// wB = atoi(argv[3]);
// hB = wA;
// aValue = atoi(argv[4]);
// bValue = atoi(argv[5]);
// }
// /**
// * Neutral - both for host and device */
//
// int wC = wB;
// int hC = hA;
//
// size_t size_a = sizeof(float) * wA * hA;
// size_t size_b = sizeof(float) * wB * hB;
// size_t size_c = sizeof(float) * wC * hC;
//
//
// // host
// float *a, *b, *c, *hh_c;
// a = (float *) malloc(size_a);
// b = (float *) malloc(size_b);
// c = (float *) malloc(size_c);
// /* Host test memory */
// hh_c = (float *) malloc(size_c);
//
// assert(hh_c != NULL);
//
// /**
// * Device specific */
//
// // device
// float *_a, *_b, *_c;
// cudaMalloc( (void **) &_a, size_a );
// cudaMalloc( (void **) &_b, size_b );
// cudaMalloc( (void **) &_c, size_c );
//
// /**
// Neutral */
// // initialize A
// for(int i=0; i < hA * wA; i++)
// {
// a[i] = aValue;
// }
//
// // initialize B
// for(int i=0; i < hB * wB; i++)
// {
// b[i] = bValue;
// }
//
// /**
// Device*/
//
// // copy data to GPU
// cudaMemcpy(_a, a, size_a, cudaMemcpyHostToDevice);
// cudaMemcpy(_b, b, size_b, cudaMemcpyHostToDevice);
//
// // x : col , y: row
// dim3 blockSize(16,16);
// // (N.x + blockSize.x - 1)/blockSize.x, (N.y + blockSize.y -1)/blockSize.y)
// dim3 gridSize((wC+15)/16, (hC+15)/16);
//
// cudaError_t error;
//
// cudaEvent_t start, stop;
//
// ///////////////////////////////////////////////////
// // OPTIMIZED
//
// error = cudaEventCreate(&start);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to create start event (error code %s)!\n",
// cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// error = cudaEventCreate(&stop);
//
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to create stop event (error code %s)!\n",
// cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// // Record the start event
// error = cudaEventRecord(start, NULL);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to record start event (error code %s)!\n",
// cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
//
// // kernel execution
// cuMult<<< gridSize, blockSize >>>(_a, _b, _c, wA, wB, hA);
//
// // Record the stop event
// error = cudaEventRecord(stop, NULL);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to record stop event (error code %s)!\n",
// cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// // Wait for the stop event to complete
// error = cudaEventSynchronize(stop);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n",
// cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// float sgemm_msec = 0.f;
// error = cudaEventElapsedTime(&sgemm_msec, start, stop);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n",
// cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
//
//
// // C := alpha*op( A )*op( B ) + beta*C
// // GEMM performs 4 floating point operations for one data output
// //double flops_sgemm = 4.0 * (double) NI * (double) NJ * (double) NK;
//
// //double gigaFlops = (flops_sgemm * 1.0e-9f) / (sgemm_msec / 1000.f);
//
// printf("%.4f\t", sgemm_msec);
//// printf("N_Time: %.3f\n, WorkgroupSize= %u threads/block\n",
//// //gigaFlops,
//// sgemm_msec,
//// //flops_sgemm,
//// blockSize.x * blockSize.y);
//
//
// // copy data back to CPU
// cudaMemcpy(c, _c, size_c, cudaMemcpyDeviceToHost);
//
// /////////////////////////////////////////////////
// // OPTIMIZED
//
// error = cudaEventCreate(&start);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to create start event (error code %s)!\n",
// cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// error = cudaEventCreate(&stop);
//
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to create stop event (error code %s)!\n",
// cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// // Record the start event
// error = cudaEventRecord(start, NULL);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to record start event (error code %s)!\n",
// cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
//
// // kernel execution
// cuMultOpti<<< gridSize, blockSize >>>(_a, _b, _c, wA, wB, hA);
//
// // Record the stop event
// error = cudaEventRecord(stop, NULL);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to record stop event (error code %s)!\n",
// cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// // Wait for the stop event to complete
// error = cudaEventSynchronize(stop);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n",
// cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
// sgemm_msec = 0.f;
// error = cudaEventElapsedTime(&sgemm_msec, start, stop);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n",
// cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
//
//// printf("O_Time: %.3f\nWorkgroupSize= %u threads/block\n",
//// //gigaFlops,
//// sgemm_msec,
//// //flops_sgemm,
//// blockSize.x * blockSize.y);
// printf("%.4f\t", sgemm_msec);
//
//
//
// // copy data back to CPU
// cudaMemcpy(c, _c, size_c, cudaMemcpyDeviceToHost);
//
// //////////////////////////////////////////////////
// // HOST
//
// // compare with cpu results
// /**
// Host*/
// double h_start, h_end;
// h_start = microSeconds();
// h_MatrixMult_Naive(a, b, hh_c, wA, wB, hA);
// h_end = microSeconds();
//
// printf("%4.4f\t", (h_end - h_start) * 1000);
//
// // Check first and last memory location
// //printf("Start: %d. Finish: %d.\n",c[2], c[wC * hC - 1]);
//
// /* Check */
//// // Naive check
//// int k = 0;
//// while(c[k] == c[k+1])
//// k++;
//// printf("EQ Test: Breakpoint @ %d\n",k);
//
// int fail = 0;
// for( int k = 0; k< wB*hA; k++)
// {
// if(abs(c[k] - hh_c[k]) > 1e-5)
// fail++;
// }
// printf("\nWorkgroup: %d Data: %d Failures: %d\n", blockSize.x*blockSize.y, wC, fail);
//
// // release resources
// cudaFree(_a);
// cudaFree(_b);
// cudaFree(_c);
//
// free(a);
// free(b);
// free(c);
// free(hh_c);
//
// return 0;
}
|
ab65897117685bd856d62226d3ab3e4dadb3499c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This program takes the convolution of a given matrix by running the convolution filter
* The filter is of size 3x3 and is hardcoded.
* The program effectively takes care of padding.
* Performance for a 512x512 input matrix came best for shared memory of 16K (total 48K).
* The output of the program is re-verfied using MATLAB and using DFT properties.
* Implemented in CUDA
*
*
*
* code by Anand Goyal. Dated : 12/13/2014
*/
#include<stdio.h>
#include<cuda.h>
#include<time.h>
#include<sys/time.h>
#define NROWS 512
#define NCOLS 512
#define FILTER_ROWS 3
#define FILTER_COLS 3
__global__ void convKernel(int *inData, int *filter, int dataCol, int dataRow, int filRowRad, int filColRad,
int *outData)
{
__shared__ int padRect[16*1024];
int i, col, row, sum = 0;
int globalCol = threadIdx.x + blockIdx.x * blockDim.x;
int globalRow = threadIdx.y + blockIdx.y * blockDim.y;
int globalIdx = globalCol * dataRow + globalRow;
int localIdx = threadIdx.x * blockDim.y + threadIdx.y;
int localCells = blockDim.x * blockDim.y;
int padRectCol = threadIdx.x + filColRad;
int padRectRow = threadIdx.y + filRowRad;
int padRectOffset = 2*filRowRad + blockDim.y;
int padRectCells = padRectOffset * (blockDim.x + 2*filColRad);
int *padRectOut = (int*)&padRect[((padRectCells-1)/32 + 1) * 32]; //Padding up with 32
padRectOut[localIdx] = 0;
int filOffset = filRowRad*2 + 1;
int filCells = filOffset * (filColRad*2 + 1);
int *localFilter = (int *)&padRectOut[((localCells-1)/32 + 1) * 32]; //Padding up with 32
// Copying the filter elements to shared memory
for(i = 0; i < (filCells/localCells) + 1; i++) {
int index = i*localCells + localIdx;
if(index < filCells) {
localFilter[index] = filter[index];
}
}
// Copying the Data elements to padded shared memory
for(i = 0; i < (padRectCells/localCells) + 1; i++) {
int index = i*localCells + localIdx;
if(index < padRectCells) {
int prCol = index / padRectOffset;
int prRow = index % padRectOffset;
int glCol = prCol + blockIdx.x*blockDim.x - filColRad;
int glRow = prRow + blockIdx.y*blockDim.y - filRowRad;
int glIdx = glCol * dataRow + glRow;
if(glRow >= 0 && glRow < dataRow && glCol >= 0 && glCol < dataCol)
padRect[index] = inData[glIdx];
else
padRect[index] = 0;
}
}
__syncthreads();
//Taking Convolution
for(col = -filColRad; col <= filColRad; col++) {
for(row = -filRowRad; row <= filRowRad; row++) {
int filCol = filColRad - col;
int filRow = filRowRad - row;
int filIdx = filCol*filOffset + filRow;
int filVal = localFilter[filIdx];
int prCol = padRectCol + col;
int prRow = padRectRow + row;
int prIdx = prCol*padRectOffset + prRow;
sum += filVal * padRect[prIdx];
}
}
padRectOut[localIdx] = sum;
__syncthreads();
outData[globalIdx] = padRectOut[localIdx];
}
int main()
{
int *input, *output, *filter;
int *d_input, *d_output, *d_filter;
int i, j;
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start, 0);
hipEventCreate(&stop, 0);
input = (int *)malloc(sizeof(int) * NROWS*NCOLS);
output = (int *)malloc(sizeof(int) * NROWS*NCOLS);
filter = (int *)malloc(sizeof(int) * FILTER_ROWS*FILTER_COLS);
hipMalloc((void **)&d_input, sizeof(int) * NROWS*NCOLS);
hipMalloc((void **)&d_output, sizeof(int) * NROWS*NCOLS);
hipMalloc((void **)&d_filter, sizeof(int) * FILTER_ROWS*FILTER_COLS);
dim3 numOfThreads(16, 16, 1);
dim3 numOfBlocks(NROWS/16, NCOLS/16, 1);
//Populating Input Matrix
for(i = 0; i < NROWS; i++)
for(j = 0; j < NCOLS; j++)
input[i*NCOLS + j] = rand()%20 - 10;
//Populating Filter Matrix
filter[0] = -1; filter[1] = 0; filter[2] = 1;
filter[3] = -2; filter[4] = 0; filter[5] = 2;
filter[6] = -1; filter[7] = 0; filter[8] = 1;
hipEventRecord(start, 0);
hipMemcpy(d_input, input, sizeof(int)*NROWS*NCOLS, hipMemcpyHostToDevice);
hipMemcpy(d_filter, filter, sizeof(int)*FILTER_ROWS*FILTER_COLS, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( convKernel), dim3(numOfBlocks), dim3(numOfThreads), 0, 0, d_input, d_filter, NROWS, NCOLS, FILTER_ROWS/2, FILTER_COLS/2,
d_output);
hipMemcpy(output, d_output, sizeof(int) * NROWS*NCOLS, hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
/* printf("After Convolution :\n");
for(i = 0; i < NROWS; i++) {
for(j = 0; j < NCOLS; j++)
printf("%d ",output[i*NCOLS + j]);
printf("\n");
}
*/
printf("Time : %3.1f ms \n", elapsedTime);
hipFree(d_input);
hipFree(d_output);
hipFree(d_filter);
free(input);
free(output);
free(filter);
return 0;
}
|
ab65897117685bd856d62226d3ab3e4dadb3499c.cu
|
/* This program takes the convolution of a given matrix by running the convolution filter
* The filter is of size 3x3 and is hardcoded.
* The program effectively takes care of padding.
* Performance for a 512x512 input matrix came best for shared memory of 16K (total 48K).
* The output of the program is re-verfied using MATLAB and using DFT properties.
* Implemented in CUDA
*
*
*
* code by Anand Goyal. Dated : 12/13/2014
*/
#include<stdio.h>
#include<cuda.h>
#include<time.h>
#include<sys/time.h>
#define NROWS 512
#define NCOLS 512
#define FILTER_ROWS 3
#define FILTER_COLS 3
__global__ void convKernel(int *inData, int *filter, int dataCol, int dataRow, int filRowRad, int filColRad,
int *outData)
{
__shared__ int padRect[16*1024];
int i, col, row, sum = 0;
int globalCol = threadIdx.x + blockIdx.x * blockDim.x;
int globalRow = threadIdx.y + blockIdx.y * blockDim.y;
int globalIdx = globalCol * dataRow + globalRow;
int localIdx = threadIdx.x * blockDim.y + threadIdx.y;
int localCells = blockDim.x * blockDim.y;
int padRectCol = threadIdx.x + filColRad;
int padRectRow = threadIdx.y + filRowRad;
int padRectOffset = 2*filRowRad + blockDim.y;
int padRectCells = padRectOffset * (blockDim.x + 2*filColRad);
int *padRectOut = (int*)&padRect[((padRectCells-1)/32 + 1) * 32]; //Padding up with 32
padRectOut[localIdx] = 0;
int filOffset = filRowRad*2 + 1;
int filCells = filOffset * (filColRad*2 + 1);
int *localFilter = (int *)&padRectOut[((localCells-1)/32 + 1) * 32]; //Padding up with 32
// Copying the filter elements to shared memory
for(i = 0; i < (filCells/localCells) + 1; i++) {
int index = i*localCells + localIdx;
if(index < filCells) {
localFilter[index] = filter[index];
}
}
// Copying the Data elements to padded shared memory
for(i = 0; i < (padRectCells/localCells) + 1; i++) {
int index = i*localCells + localIdx;
if(index < padRectCells) {
int prCol = index / padRectOffset;
int prRow = index % padRectOffset;
int glCol = prCol + blockIdx.x*blockDim.x - filColRad;
int glRow = prRow + blockIdx.y*blockDim.y - filRowRad;
int glIdx = glCol * dataRow + glRow;
if(glRow >= 0 && glRow < dataRow && glCol >= 0 && glCol < dataCol)
padRect[index] = inData[glIdx];
else
padRect[index] = 0;
}
}
__syncthreads();
//Taking Convolution
for(col = -filColRad; col <= filColRad; col++) {
for(row = -filRowRad; row <= filRowRad; row++) {
int filCol = filColRad - col;
int filRow = filRowRad - row;
int filIdx = filCol*filOffset + filRow;
int filVal = localFilter[filIdx];
int prCol = padRectCol + col;
int prRow = padRectRow + row;
int prIdx = prCol*padRectOffset + prRow;
sum += filVal * padRect[prIdx];
}
}
padRectOut[localIdx] = sum;
__syncthreads();
outData[globalIdx] = padRectOut[localIdx];
}
int main()
{
int *input, *output, *filter;
int *d_input, *d_output, *d_filter;
int i, j;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start, 0);
cudaEventCreate(&stop, 0);
input = (int *)malloc(sizeof(int) * NROWS*NCOLS);
output = (int *)malloc(sizeof(int) * NROWS*NCOLS);
filter = (int *)malloc(sizeof(int) * FILTER_ROWS*FILTER_COLS);
cudaMalloc((void **)&d_input, sizeof(int) * NROWS*NCOLS);
cudaMalloc((void **)&d_output, sizeof(int) * NROWS*NCOLS);
cudaMalloc((void **)&d_filter, sizeof(int) * FILTER_ROWS*FILTER_COLS);
dim3 numOfThreads(16, 16, 1);
dim3 numOfBlocks(NROWS/16, NCOLS/16, 1);
//Populating Input Matrix
for(i = 0; i < NROWS; i++)
for(j = 0; j < NCOLS; j++)
input[i*NCOLS + j] = rand()%20 - 10;
//Populating Filter Matrix
filter[0] = -1; filter[1] = 0; filter[2] = 1;
filter[3] = -2; filter[4] = 0; filter[5] = 2;
filter[6] = -1; filter[7] = 0; filter[8] = 1;
cudaEventRecord(start, 0);
cudaMemcpy(d_input, input, sizeof(int)*NROWS*NCOLS, cudaMemcpyHostToDevice);
cudaMemcpy(d_filter, filter, sizeof(int)*FILTER_ROWS*FILTER_COLS, cudaMemcpyHostToDevice);
convKernel<<<numOfBlocks, numOfThreads>>>(d_input, d_filter, NROWS, NCOLS, FILTER_ROWS/2, FILTER_COLS/2,
d_output);
cudaMemcpy(output, d_output, sizeof(int) * NROWS*NCOLS, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
/* printf("After Convolution :\n");
for(i = 0; i < NROWS; i++) {
for(j = 0; j < NCOLS; j++)
printf("%d ",output[i*NCOLS + j]);
printf("\n");
}
*/
printf("Time : %3.1f ms \n", elapsedTime);
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_filter);
free(input);
free(output);
free(filter);
return 0;
}
|
b4267aec9bb2cbb7f836899937c05c9b880a88b5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <fstream>
#include <iostream>
#include <string>
#include <clocale>
#include <cctype>
#define HASH_SIZE 20
#define PADDING_CHAR '\0'
//Macro extaida de exemplos de NVIDIA: modificado printf -> cout
#define CUDA_CALL(x) do { if((x) != hipSuccess) { \
cout << "Error at " << __FILE__ << " line: " << __LINE__ << endl; \
return EXIT_FAILURE;}} while(0)
using namespace std;
// Definimos una funcion minimo por si CUDA/C++ no lo incluye para GPU
__device__ unsigned short my_strlen(char *s)
{
unsigned short len = 0;
while(len < HASH_SIZE && s[len] != PADDING_CHAR) { len++; }
return len;
}
__device__ unsigned short minimo(unsigned short a, unsigned short b){
if(a > b)
return b;
else
return a;
}
__device__ unsigned short lev_dist(char *s1, char *s2){
unsigned short l1, l2, i, j, c, res, w;
l1 = my_strlen(s1);
l2 = my_strlen(s2);
// Verifica que exista algo que comparar
if (l1 == 0) return(l2);
if (l2 == 0) return(l1);
w = l1 + 1;
// Reserva matriz con malloc: m[i,j] = m[j*w+i] !!
unsigned short m[((HASH_SIZE+1)*HASH_SIZE+1) + HASH_SIZE+1];
// Rellena primera fila y primera columna
for (i = 0; i <= l1; i++) m[i] = i;
for (j = 0; j <= l2; j++) m[j*w] = j;
// Recorremos resto de la matriz llenando pesos
for (i = 1; i <= l1; i++){
for (j = 1; j <= l2; j++)
{
if (s1[i-1] == s2[j-1])
c = 0;
else
c = 1;
m[j*w+i] = minimo(minimo(m[j*w+i-1]+1, m[(j-1)*w+i]+1), m[(j-1)*w+i-1]+c);
}
}
// Devolvemos esquina final de la matriz
res = m[l2*w+l1];
return(res);
}
/*----------------------------------------------------------------------------
----------------------------------------------------------------------------
----------------------- GLOBAL FUNCTIONS: KERNELS ------------------------
----------------------------------------------------------------------------
----------------------------------------------------------------------------*/
__global__ void k_setupPadding(char *first_word, unsigned int total_entradas, char _padd = (char)PADDING_CHAR){
unsigned int idx = threadIdx.x + (blockDim.x * blockIdx.x);
unsigned int stride = blockDim.x * gridDim.x;
while(idx < total_entradas){ // Aqui direccionamos de caracter en caracter
for(unsigned char _off = 0; _off < HASH_SIZE; _off++){
first_word[(idx * HASH_SIZE) + _off] = _padd;
}
idx += stride;
}
}
__global__ void k_levenshtein(char *str, char *first_word, unsigned int total_entradas, unsigned int *out_idx, unsigned short *out_dist)
{
char local_str[HASH_SIZE];
// Bottleneck
for(unsigned char i = 0; i < HASH_SIZE; i++) {
local_str[i] = str[i];
}
// Para copiar de vuelta en CPU
unsigned short local_min = 0xFFFF;
unsigned int min_idx = 0;
// Limites e indices
unsigned int idx = threadIdx.x + (blockDim.x * blockIdx.x); // Id para stride 0
unsigned int stride = blockDim.x * gridDim.x;
// Valores calculados de cada thread
unsigned short local_dist;
while(idx < total_entradas){
// Calculo de distancia:
local_dist = lev_dist( local_str, first_word + (idx * HASH_SIZE * sizeof(char)) );
// Actualizar valores:
if(local_dist < local_min){
local_min = local_dist;
min_idx = idx;
}
// Siguiente palabra:
idx += stride; // Numero de palabra
} // End while busqueda del thread
// Copia de resultados para cada thread de los minimos, back to CPU...
out_dist[threadIdx.x + (blockDim.x * blockIdx.x)] = local_min;
out_idx[threadIdx.x + (blockDim.x * blockIdx.x)] = min_idx;
}
/*---------------------------------------------------------------------------
----------------------------------------------------------------------------
------------------------------ MAIN PROGRAM ------------------------------
----------------------------------------------------------------------------
---------------------------------------------------------------------------*/
void correct_usage();
int main(int argc, char **argv)
{
unsigned int threads_per_block = 256; // MAX THREADS PER BLOCKS
unsigned int num_blocks = 4;
enum input_t {performance_mode, correct_mode} mode;
mode = performance_mode;
// Command line inputs
if( argc == 2 && strcmp(argv[1], "--help") == 0 )
{
correct_usage();
return 0;
cout << "Not ended!!!!!!!!" << endl;
}
if(argc >= 4 && strcmp(argv[1], "--grid") == 0)
{
threads_per_block = stoi(argv[2]);
num_blocks = stoi(argv[3]);
}
if(argc == 5)
{
if(strcmp(argv[4], "--correct") == 0){
mode = correct_mode;
} // else ja considerat
}
unsigned short out_gpu_len = num_blocks * threads_per_block;
if(strcmp(setlocale(LC_ALL, NULL), "C") == 0)
{
cout << "Tenim encoding 'C'. Canvi a nes des sistema..." << endl;
if(setlocale(LC_ALL, "") == NULL)
{
cout << "Failed to set new locale\nEXIT";
return EXIT_FAILURE;
}
}
ifstream fitxer;
cout << "Obrir diccionari..." << endl;
fitxer.open("dictionary.txt");
string str;
unsigned int entradas = 0; // Maximo elementos: 4,294,967,296 -> [0, 4,294,967,296)
if(!fitxer){
cout << endl << "Diccionari no obert" << endl << "EXIT" << endl;
return EXIT_FAILURE;
}
else { // Lectura de la cantidad de entradas en el fichero:
unsigned short longest = 0;
cout << endl << "Diccionari obert" << endl << "Llegint..." << endl;
getline(fitxer, str); // En caso de solo tener una entrada o poder detectar los ficheros de solo una entrada.
while( !fitxer.eof() ){
entradas++;
getline(fitxer, str);
if(longest < str.length()){
longest = str.length();
}
}
cout << "Entrades diccionari: " << entradas << endl;
cout << "Palabra mas larga: " << longest << endl;
if(longest > HASH_SIZE){
cout << endl << "Macro HASH_SIZE insuficient. Minim recomanat: " << longest << endl;
cout << "Exit" << endl;
return EXIT_FAILURE;
}
}
cout << endl << "Close file... " << endl;
fitxer.close();
// Allotjam espai per guardar tot el diccionari. Per aix tenim el nombre d'entrades
// Llegim fixer i guardam les lnies sense completar amb PADDING_CHAR -> Aix ho passam a kernel...
cout << "Allotjar memoria diccionari per " << entradas << " entrades de 25 char..." << endl;
char *first_word;
CUDA_CALL( hipMallocManaged(&first_word, entradas * HASH_SIZE * sizeof(char)) );
// Cridada kernel de inicialitzacio memoria GPU -> PADDING_CHARs
//
cout << endl << "Kernel call. Threads per block: " << threads_per_block << endl;
cout << "k_setupPadding..." << endl;
hipLaunchKernelGGL(( k_setupPadding), dim3(num_blocks), dim3(threads_per_block), 0, 0, first_word, entradas);
CUDA_CALL( hipDeviceSynchronize() );
cout << "Padding ended" << endl;
// Cambio de puntero on el que trabajar para no perder origen
char *word = first_word;
// Lectura de fitxer i bolcat de chars
//
cout << endl << "Reopen file" << endl;
fitxer.open("dictionary.txt");
if(!fitxer){
cout << endl << "Fitxer no obert" << endl << "EXIT" << endl;
return EXIT_FAILURE;
}
cout << "Fitxer a inici" << endl;
cout << endl << "Lectura de fitxer a memoria..." << endl;
unsigned short i;
while( !fitxer.eof() ){// Llegir lnies senceres, despres convertir a nes nostro format de string
getline(fitxer, str);
i = 0;
while(i < str.length()){ // Conociendo HASH_SIZE, no hace falta delimitador '\0'
word[i] = str[i];
i++;
}
// El final del diccionari sera: word + (entradas * HASH_SIZE * sizeof(char)) no inclos.
// Salto de palabra:
word = word + (HASH_SIZE * sizeof(char));
}
cout << "Reading completed" << endl;
fitxer.close();
cout << "Dictionary closed" << endl;
// Stats:
cout << endl << "Numero de entradas existentes: " << entradas << endl;
//
// End file reading and stored in memory.
//
//
// Declaracions comuns:
cout << endl << "Reserva de memoria para los resultados: distancias calculadas e ndice para cada thread" << endl;
unsigned int *out_idx;
unsigned short *out_dist;
CUDA_CALL( hipMallocManaged(&out_idx, out_gpu_len * sizeof(unsigned int)) );
CUDA_CALL( hipMallocManaged(&out_dist, out_gpu_len * sizeof(unsigned short)) );
//In file
ifstream in_file;
cout << endl << "Documento para tomar inputs: ";
cin >> str;
in_file.open(str.c_str());
if(!in_file){
cout << endl << str << " no obert correctament. EXIT." << endl;
return EXIT_FAILURE;
}
// Out file
ofstream out_file;
if(mode == correct_mode)
out_file.open("corrected.txt", ofstream::out | ofstream::app);
else
out_file.open("report.txt", ofstream::out | ofstream::app);
if(!out_file)
{
cout << endl << "Sortida no oberta/creada correctament. EXIT." << endl;
return EXIT_FAILURE;
}
cout << "Resultado en fichero: corrected.txt" << endl;
// Variables auxiliars per correccio:
char *query_word;
CUDA_CALL( hipMallocManaged(&query_word, HASH_SIZE * sizeof(char)) );
unsigned short _min;
unsigned int launches = 0, corrected_w = 0, wrong_words = 0;
char insp;
//
//
// Consultes sobre memoria o correccio amb sa memoria
//
if(mode == correct_mode)
{
while ( !in_file.eof() && in_file.good() && out_file.good() )
{
i = 0;
while(i < HASH_SIZE)
{ // Get raw word
insp = (char) in_file.peek();
if( isalpha(insp) ) {
in_file.get(query_word[i]);
}
else {
query_word[i] = PADDING_CHAR;
}
i++;
}
// Consulta a memoria.
hipLaunchKernelGGL(( k_levenshtein), dim3(num_blocks), dim3(threads_per_block), 0, 0, query_word, first_word, entradas, out_idx, out_dist);
CUDA_CALL( hipDeviceSynchronize() );
launches++;
// Encontramos el resultado de menor valor
_min = 0;
for(unsigned short k = 0; (k < out_gpu_len) && (out_dist[_min] != 0); k++) {
if (out_dist[_min] > out_dist[k]) {
_min = k;
}
}
if(out_dist[_min] != 0)
corrected_w++;
// Con el mnimo tenemos el puntero a la palabra que se escribe en out_file
word = first_word + (out_idx[_min] * HASH_SIZE * sizeof(char));
for(unsigned char k = 0; (k < HASH_SIZE) && (word[k] != PADDING_CHAR); k++) { out_file << word[k]; }
// Escriure caracters entre paraules:
while( !isalpha((char)in_file.peek()) && !in_file.eof() )
{
in_file.get(insp);
out_file << insp;
if(insp == '*') { wrong_words++; }
}
if((launches % 10000) == 0)
cout << "Launches: " << launches << endl;
}
}
else
{
while( !in_file.eof() && in_file.good() )
{
// Input de paraula per calcular distancia:
str.clear();
in_file >> str;
out_file << "Q: " << str << " ";
// Relleno de lo que nos entra...
for(unsigned short letra = 0; letra < HASH_SIZE; letra++){
if(letra < str.length()){
query_word[letra] = str[letra];
} else {
query_word[letra] = (char)PADDING_CHAR;
}
}
hipLaunchKernelGGL(( k_levenshtein), dim3(num_blocks), dim3(threads_per_block), 0, 0, query_word, first_word, entradas, out_idx, out_dist);
CUDA_CALL( hipDeviceSynchronize() );
launches++;
// Bucle de resultats...
cout << endl << "Look for minimum dist result..." << endl;
_min = 0;
for(i = 0; i < out_gpu_len; i++){
if(out_dist[i] <= out_dist[_min]){
_min = i;
}
}
// Apuntamos a la palabra seleccionada y al fichero
word = first_word + ( out_idx[_min] * HASH_SIZE * sizeof(char) ); // Ponemos el puntero en la palabra que necesitamos...
for(unsigned char k = 0; (k < HASH_SIZE) && (word[k] != PADDING_CHAR); k++) { out_file << word[k]; }
out_file << " (" << out_dist[_min] << ")" << endl;
}
}
cout << "Processing ended" << endl;
// Tancam I/O files:
in_file.close(); out_file.close();
cout << "Archivos cerrados" << endl;
//
//
//
//
CUDA_CALL( hipFree(first_word) );
CUDA_CALL( hipFree(query_word) );
CUDA_CALL( hipFree(out_idx) );
CUDA_CALL( hipFree(out_dist) );
cout << "hipDeviceReset()..." << endl;
CUDA_CALL( hipDeviceReset() );
cout << "Device cleared" << endl;
if(mode == correct_mode)
{
cout << endl << "======================== RUN INFO ========================" << endl;
cout << " - Threads tot: " << out_gpu_len << endl;
cout << " - Blocks:" << num_blocks << '\t' << "Threads: " << threads_per_block << endl << endl;
cout << " - Words searched: " << launches << endl;
cout << " - Total corrections: " << corrected_w << endl;
cout << " - Real alterations: " << wrong_words << endl;
}
else
{
cout << endl << "======================== RUN INFO ========================" << endl;
cout << " - Threads tot: " << out_gpu_len << endl;
cout << " - Blocks:" << num_blocks << '\t' << "Threads: " << threads_per_block << endl << endl;
cout << " - Words consulted: " << launches << endl;
}
return 0;
}
// Definimos correct_usage:
void correct_usage()
{
cout << endl << "Author: Jover Mulet, Mateu. Contact @ [email protected]" << endl;
cout << "Electrical and Electronics Engineer from the U. of the Balearic Islands" << endl;
cout << endl << "Setting the computational grid. Correct use for Ubuntu terminal command line options:" << endl;
cout << " --grid [Threads per block (default: 256)] [Number of blocks(default: 4)] [--correct or --test(default)]" << endl;
cout << "Check your GPGPU's specs for a better usage. Desired multiples of 32 for Threads per block." << endl;
cout << "Displaying help, as it has been done. Just the parameter --help " << endl;
}
|
b4267aec9bb2cbb7f836899937c05c9b880a88b5.cu
|
#include <fstream>
#include <iostream>
#include <string>
#include <clocale>
#include <cctype>
#define HASH_SIZE 20
#define PADDING_CHAR '\0'
//Macro extaida de exemplos de NVIDIA: modificado printf -> cout
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
cout << "Error at " << __FILE__ << " line: " << __LINE__ << endl; \
return EXIT_FAILURE;}} while(0)
using namespace std;
// Definimos una funcion minimo por si CUDA/C++ no lo incluye para GPU
__device__ unsigned short my_strlen(char *s)
{
unsigned short len = 0;
while(len < HASH_SIZE && s[len] != PADDING_CHAR) { len++; }
return len;
}
__device__ unsigned short minimo(unsigned short a, unsigned short b){
if(a > b)
return b;
else
return a;
}
__device__ unsigned short lev_dist(char *s1, char *s2){
unsigned short l1, l2, i, j, c, res, w;
l1 = my_strlen(s1);
l2 = my_strlen(s2);
// Verifica que exista algo que comparar
if (l1 == 0) return(l2);
if (l2 == 0) return(l1);
w = l1 + 1;
// Reserva matriz con malloc: m[i,j] = m[j*w+i] !!
unsigned short m[((HASH_SIZE+1)*HASH_SIZE+1) + HASH_SIZE+1];
// Rellena primera fila y primera columna
for (i = 0; i <= l1; i++) m[i] = i;
for (j = 0; j <= l2; j++) m[j*w] = j;
// Recorremos resto de la matriz llenando pesos
for (i = 1; i <= l1; i++){
for (j = 1; j <= l2; j++)
{
if (s1[i-1] == s2[j-1])
c = 0;
else
c = 1;
m[j*w+i] = minimo(minimo(m[j*w+i-1]+1, m[(j-1)*w+i]+1), m[(j-1)*w+i-1]+c);
}
}
// Devolvemos esquina final de la matriz
res = m[l2*w+l1];
return(res);
}
/*----------------------------------------------------------------------------
----------------------------------------------------------------------------
----------------------- GLOBAL FUNCTIONS: KERNELS ------------------------
----------------------------------------------------------------------------
----------------------------------------------------------------------------*/
__global__ void k_setupPadding(char *first_word, unsigned int total_entradas, char _padd = (char)PADDING_CHAR){
unsigned int idx = threadIdx.x + (blockDim.x * blockIdx.x);
unsigned int stride = blockDim.x * gridDim.x;
while(idx < total_entradas){ // Aqui direccionamos de caracter en caracter
for(unsigned char _off = 0; _off < HASH_SIZE; _off++){
first_word[(idx * HASH_SIZE) + _off] = _padd;
}
idx += stride;
}
}
__global__ void k_levenshtein(char *str, char *first_word, unsigned int total_entradas, unsigned int *out_idx, unsigned short *out_dist)
{
char local_str[HASH_SIZE];
// Bottleneck
for(unsigned char i = 0; i < HASH_SIZE; i++) {
local_str[i] = str[i];
}
// Para copiar de vuelta en CPU
unsigned short local_min = 0xFFFF;
unsigned int min_idx = 0;
// Limites e indices
unsigned int idx = threadIdx.x + (blockDim.x * blockIdx.x); // Id para stride 0
unsigned int stride = blockDim.x * gridDim.x;
// Valores calculados de cada thread
unsigned short local_dist;
while(idx < total_entradas){
// Calculo de distancia:
local_dist = lev_dist( local_str, first_word + (idx * HASH_SIZE * sizeof(char)) );
// Actualizar valores:
if(local_dist < local_min){
local_min = local_dist;
min_idx = idx;
}
// Siguiente palabra:
idx += stride; // Numero de palabra
} // End while busqueda del thread
// Copia de resultados para cada thread de los minimos, back to CPU...
out_dist[threadIdx.x + (blockDim.x * blockIdx.x)] = local_min;
out_idx[threadIdx.x + (blockDim.x * blockIdx.x)] = min_idx;
}
/*---------------------------------------------------------------------------
----------------------------------------------------------------------------
------------------------------ MAIN PROGRAM ------------------------------
----------------------------------------------------------------------------
---------------------------------------------------------------------------*/
void correct_usage();
int main(int argc, char **argv)
{
unsigned int threads_per_block = 256; // MAX THREADS PER BLOCKS
unsigned int num_blocks = 4;
enum input_t {performance_mode, correct_mode} mode;
mode = performance_mode;
// Command line inputs
if( argc == 2 && strcmp(argv[1], "--help") == 0 )
{
correct_usage();
return 0;
cout << "Not ended!!!!!!!!" << endl;
}
if(argc >= 4 && strcmp(argv[1], "--grid") == 0)
{
threads_per_block = stoi(argv[2]);
num_blocks = stoi(argv[3]);
}
if(argc == 5)
{
if(strcmp(argv[4], "--correct") == 0){
mode = correct_mode;
} // else ja considerat
}
unsigned short out_gpu_len = num_blocks * threads_per_block;
if(strcmp(setlocale(LC_ALL, NULL), "C") == 0)
{
cout << "Tenim encoding 'C'. Canvi a nes des sistema..." << endl;
if(setlocale(LC_ALL, "") == NULL)
{
cout << "Failed to set new locale\nEXIT";
return EXIT_FAILURE;
}
}
ifstream fitxer;
cout << "Obrir diccionari..." << endl;
fitxer.open("dictionary.txt");
string str;
unsigned int entradas = 0; // Maximo elementos: 4,294,967,296 -> [0, 4,294,967,296)
if(!fitxer){
cout << endl << "Diccionari no obert" << endl << "EXIT" << endl;
return EXIT_FAILURE;
}
else { // Lectura de la cantidad de entradas en el fichero:
unsigned short longest = 0;
cout << endl << "Diccionari obert" << endl << "Llegint..." << endl;
getline(fitxer, str); // En caso de solo tener una entrada o poder detectar los ficheros de solo una entrada.
while( !fitxer.eof() ){
entradas++;
getline(fitxer, str);
if(longest < str.length()){
longest = str.length();
}
}
cout << "Entrades diccionari: " << entradas << endl;
cout << "Palabra mas larga: " << longest << endl;
if(longest > HASH_SIZE){
cout << endl << "Macro HASH_SIZE insuficient. Minim recomanat: " << longest << endl;
cout << "Exit" << endl;
return EXIT_FAILURE;
}
}
cout << endl << "Close file... " << endl;
fitxer.close();
// Allotjam espai per guardar tot el diccionari. Per això tenim el nombre d'entrades
// Llegim fixer i guardam les línies sense completar amb PADDING_CHAR -> Això ho passam a kernel...
cout << "Allotjar memoria diccionari per " << entradas << " entrades de 25 char..." << endl;
char *first_word;
CUDA_CALL( cudaMallocManaged(&first_word, entradas * HASH_SIZE * sizeof(char)) );
// Cridada kernel de inicialitzacio memoria GPU -> PADDING_CHARs
//
cout << endl << "Kernel call. Threads per block: " << threads_per_block << endl;
cout << "k_setupPadding..." << endl;
k_setupPadding<<<num_blocks, threads_per_block>>>(first_word, entradas);
CUDA_CALL( cudaDeviceSynchronize() );
cout << "Padding ended" << endl;
// Cambio de puntero on el que trabajar para no perder origen
char *word = first_word;
// Lectura de fitxer i bolcat de chars
//
cout << endl << "Reopen file" << endl;
fitxer.open("dictionary.txt");
if(!fitxer){
cout << endl << "Fitxer no obert" << endl << "EXIT" << endl;
return EXIT_FAILURE;
}
cout << "Fitxer a inici" << endl;
cout << endl << "Lectura de fitxer a memoria..." << endl;
unsigned short i;
while( !fitxer.eof() ){// Llegir línies senceres, despres convertir a nes nostro format de string
getline(fitxer, str);
i = 0;
while(i < str.length()){ // Conociendo HASH_SIZE, no hace falta delimitador '\0'
word[i] = str[i];
i++;
}
// El final del diccionari sera: word + (entradas * HASH_SIZE * sizeof(char)) no inclos.
// Salto de palabra:
word = word + (HASH_SIZE * sizeof(char));
}
cout << "Reading completed" << endl;
fitxer.close();
cout << "Dictionary closed" << endl;
// Stats:
cout << endl << "Numero de entradas existentes: " << entradas << endl;
//
// End file reading and stored in memory.
//
//
// Declaracions comuns:
cout << endl << "Reserva de memoria para los resultados: distancias calculadas e índice para cada thread" << endl;
unsigned int *out_idx;
unsigned short *out_dist;
CUDA_CALL( cudaMallocManaged(&out_idx, out_gpu_len * sizeof(unsigned int)) );
CUDA_CALL( cudaMallocManaged(&out_dist, out_gpu_len * sizeof(unsigned short)) );
//In file
ifstream in_file;
cout << endl << "Documento para tomar inputs: ";
cin >> str;
in_file.open(str.c_str());
if(!in_file){
cout << endl << str << " no obert correctament. EXIT." << endl;
return EXIT_FAILURE;
}
// Out file
ofstream out_file;
if(mode == correct_mode)
out_file.open("corrected.txt", ofstream::out | ofstream::app);
else
out_file.open("report.txt", ofstream::out | ofstream::app);
if(!out_file)
{
cout << endl << "Sortida no oberta/creada correctament. EXIT." << endl;
return EXIT_FAILURE;
}
cout << "Resultado en fichero: corrected.txt" << endl;
// Variables auxiliars per correccio:
char *query_word;
CUDA_CALL( cudaMallocManaged(&query_word, HASH_SIZE * sizeof(char)) );
unsigned short _min;
unsigned int launches = 0, corrected_w = 0, wrong_words = 0;
char insp;
//
//
// Consultes sobre memoria o correccio amb sa memoria
//
if(mode == correct_mode)
{
while ( !in_file.eof() && in_file.good() && out_file.good() )
{
i = 0;
while(i < HASH_SIZE)
{ // Get raw word
insp = (char) in_file.peek();
if( isalpha(insp) ) {
in_file.get(query_word[i]);
}
else {
query_word[i] = PADDING_CHAR;
}
i++;
}
// Consulta a memoria.
k_levenshtein<<<num_blocks, threads_per_block>>>(query_word, first_word, entradas, out_idx, out_dist);
CUDA_CALL( cudaDeviceSynchronize() );
launches++;
// Encontramos el resultado de menor valor
_min = 0;
for(unsigned short k = 0; (k < out_gpu_len) && (out_dist[_min] != 0); k++) {
if (out_dist[_min] > out_dist[k]) {
_min = k;
}
}
if(out_dist[_min] != 0)
corrected_w++;
// Con el mínimo tenemos el puntero a la palabra que se escribe en out_file
word = first_word + (out_idx[_min] * HASH_SIZE * sizeof(char));
for(unsigned char k = 0; (k < HASH_SIZE) && (word[k] != PADDING_CHAR); k++) { out_file << word[k]; }
// Escriure caracters entre paraules:
while( !isalpha((char)in_file.peek()) && !in_file.eof() )
{
in_file.get(insp);
out_file << insp;
if(insp == '*') { wrong_words++; }
}
if((launches % 10000) == 0)
cout << "Launches: " << launches << endl;
}
}
else
{
while( !in_file.eof() && in_file.good() )
{
// Input de paraula per calcular distancia:
str.clear();
in_file >> str;
out_file << "Q: " << str << " ";
// Relleno de lo que nos entra...
for(unsigned short letra = 0; letra < HASH_SIZE; letra++){
if(letra < str.length()){
query_word[letra] = str[letra];
} else {
query_word[letra] = (char)PADDING_CHAR;
}
}
k_levenshtein<<<num_blocks, threads_per_block>>>(query_word, first_word, entradas, out_idx, out_dist);
CUDA_CALL( cudaDeviceSynchronize() );
launches++;
// Bucle de resultats...
cout << endl << "Look for minimum dist result..." << endl;
_min = 0;
for(i = 0; i < out_gpu_len; i++){
if(out_dist[i] <= out_dist[_min]){
_min = i;
}
}
// Apuntamos a la palabra seleccionada y al fichero
word = first_word + ( out_idx[_min] * HASH_SIZE * sizeof(char) ); // Ponemos el puntero en la palabra que necesitamos...
for(unsigned char k = 0; (k < HASH_SIZE) && (word[k] != PADDING_CHAR); k++) { out_file << word[k]; }
out_file << " (" << out_dist[_min] << ")" << endl;
}
}
cout << "Processing ended" << endl;
// Tancam I/O files:
in_file.close(); out_file.close();
cout << "Archivos cerrados" << endl;
//
//
//
//
CUDA_CALL( cudaFree(first_word) );
CUDA_CALL( cudaFree(query_word) );
CUDA_CALL( cudaFree(out_idx) );
CUDA_CALL( cudaFree(out_dist) );
cout << "cudaDeviceReset()..." << endl;
CUDA_CALL( cudaDeviceReset() );
cout << "Device cleared" << endl;
if(mode == correct_mode)
{
cout << endl << "======================== RUN INFO ========================" << endl;
cout << " - Threads tot: " << out_gpu_len << endl;
cout << " - Blocks:" << num_blocks << '\t' << "Threads: " << threads_per_block << endl << endl;
cout << " - Words searched: " << launches << endl;
cout << " - Total corrections: " << corrected_w << endl;
cout << " - Real alterations: " << wrong_words << endl;
}
else
{
cout << endl << "======================== RUN INFO ========================" << endl;
cout << " - Threads tot: " << out_gpu_len << endl;
cout << " - Blocks:" << num_blocks << '\t' << "Threads: " << threads_per_block << endl << endl;
cout << " - Words consulted: " << launches << endl;
}
return 0;
}
// Definimos correct_usage:
void correct_usage()
{
cout << endl << "Author: Jover Mulet, Mateu. Contact @ [email protected]" << endl;
cout << "Electrical and Electronics Engineer from the U. of the Balearic Islands" << endl;
cout << endl << "Setting the computational grid. Correct use for Ubuntu terminal command line options:" << endl;
cout << " --grid [Threads per block (default: 256)] [Number of blocks(default: 4)] [--correct or --test(default)]" << endl;
cout << "Check your GPGPU's specs for a better usage. Desired multiples of 32 for Threads per block." << endl;
cout << "Displaying help, as it has been done. Just the parameter --help " << endl;
}
|
8d4bca5ee3423c9bfaa3acabdb844d1f924314c8.hip
|
// !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "gaussian_kernel.cu"
#include <jni.h>
#include "Gaussian.h"
#define OUTPUT
void runTest(int argc, char** argv);
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
int
main(int argc, char** argv)
{
//runTest(argc, argv);
}
//void runTest(int h_A, char** argv)
JNIEXPORT jint JNICALL Java_Gaussian_runTest
(JNIEnv *env, jobject j_obj, jintArray j_A, jint dim)
{
hipError_t err;
//display the test case
/*
for ( int m = 0 ; m < dim; m++){
for ( int n = 0 ; n < dim; n++){
printf("%d ", h_A[m * dim + n]);
}
printf("\n");
}
*/
unsigned int size_A = dim * dim;
unsigned int mem_size_A = sizeof(int) * size_A;
printf("Inside CUDA code\n");
jint *h_A = env->GetIntArrayElements(j_A, 0);
// allocate device memory for the matrix A
int* d_A;
hipMalloc((void**)&d_A,mem_size_A);
//MODIFY HERE
int* temp; //temporary array to store dim number of integer elements
//MODIFY HERE to allocate memory for temp array
//temp=(int*)malloc(dim*sizeof(int));
hipMalloc((void**)&temp,dim*sizeof(int));
// copy host memory to device
double timer1 = gettime();
//MODIFY HERE Copy the Matrix A to GPU memory
if((err=hipMemcpy((void*)d_A,(void*)h_A,mem_size_A,hipMemcpyHostToDevice))!=hipSuccess)
printf("Error: Host to Device copy%d\n",err);
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(dim / threads.x, dim / threads.y);
// execute the kernel
for ( int i = 0 ; i < dim ; i++){
hipLaunchKernelGGL(( Gaussian_CUDA), dim3(grid), dim3(threads) , 0, 0, d_A, dim, i, temp);
}
// copy result from device to host
//MODIFY HERE
if((err=hipMemcpy((void*)h_A,(void*)d_A,mem_size_A, hipMemcpyDeviceToHost))!=hipSuccess)
printf("Error:Device to Device copy%d\n",err);
double timer2 = gettime();
printf("GPU time = %lf\n",(timer2-timer1)*1000);
#ifdef OUTPUT
//the result should be I(dim*dim)
for ( int m = 0 ; m < dim; m++){
for ( int n = 0 ; n < dim; n++){
printf("%d ", h_A[m * dim + n]);
}
printf("\n");
}
#endif
env->ReleaseIntArrayElements(j_A, h_A, 0);
//free(h_A);
hipFree(d_A);
hipFree(temp);
return 0;
}
|
8d4bca5ee3423c9bfaa3acabdb844d1f924314c8.cu
|
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
#include "gaussian_kernel.cu"
#include <jni.h>
#include "Gaussian.h"
#define OUTPUT
void runTest(int argc, char** argv);
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
int
main(int argc, char** argv)
{
//runTest(argc, argv);
}
//void runTest(int h_A, char** argv)
JNIEXPORT jint JNICALL Java_Gaussian_runTest
(JNIEnv *env, jobject j_obj, jintArray j_A, jint dim)
{
cudaError_t err;
//display the test case
/*
for ( int m = 0 ; m < dim; m++){
for ( int n = 0 ; n < dim; n++){
printf("%d ", h_A[m * dim + n]);
}
printf("\n");
}
*/
unsigned int size_A = dim * dim;
unsigned int mem_size_A = sizeof(int) * size_A;
printf("Inside CUDA code\n");
jint *h_A = env->GetIntArrayElements(j_A, 0);
// allocate device memory for the matrix A
int* d_A;
cudaMalloc((void**)&d_A,mem_size_A);
//MODIFY HERE
int* temp; //temporary array to store dim number of integer elements
//MODIFY HERE to allocate memory for temp array
//temp=(int*)malloc(dim*sizeof(int));
cudaMalloc((void**)&temp,dim*sizeof(int));
// copy host memory to device
double timer1 = gettime();
//MODIFY HERE Copy the Matrix A to GPU memory
if((err=cudaMemcpy((void*)d_A,(void*)h_A,mem_size_A,cudaMemcpyHostToDevice))!=cudaSuccess)
printf("Error: Host to Device copy%d\n",err);
// setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(dim / threads.x, dim / threads.y);
// execute the kernel
for ( int i = 0 ; i < dim ; i++){
Gaussian_CUDA<<< grid, threads >>>(d_A, dim, i, temp);
}
// copy result from device to host
//MODIFY HERE
if((err=cudaMemcpy((void*)h_A,(void*)d_A,mem_size_A, cudaMemcpyDeviceToHost))!=cudaSuccess)
printf("Error:Device to Device copy%d\n",err);
double timer2 = gettime();
printf("GPU time = %lf\n",(timer2-timer1)*1000);
#ifdef OUTPUT
//the result should be I(dim*dim)
for ( int m = 0 ; m < dim; m++){
for ( int n = 0 ; n < dim; n++){
printf("%d ", h_A[m * dim + n]);
}
printf("\n");
}
#endif
env->ReleaseIntArrayElements(j_A, h_A, 0);
//free(h_A);
cudaFree(d_A);
cudaFree(temp);
return 0;
}
|
63deab1bef0344bc7324022ddca03fb1cd8b1ad1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#define SIZEARRAY 200
__global__
void addVec(int* vecA, int* vecB, int* vecC, int size)
{
int i = threadIdx.x;
vecC[i] = vecA[i] + vecB[i];
}
void printArray(int* array)
{
for (int i = 0; i<SIZEARRAY; i++)
{
printf("%d ", array[i]);
}
}
int main()
{
//allocating memory for host
int *A = new int[SIZEARRAY];
int *B = new int[SIZEARRAY];
int *C = new int[SIZEARRAY];
//allocating memory for device
int *dA, *dB, *dC;
hipMalloc((void**)&dA, SIZEARRAY * sizeof(int));
hipMalloc((void**)&dB, SIZEARRAY * sizeof(int));
hipMalloc((void**)&dC, SIZEARRAY * sizeof(int));
//initialize A and B in host
for (int i = 0; i<SIZEARRAY; i++)
{
A[i] = i;
B[i] = SIZEARRAY - i;
}
//copy from the host to device
hipMemcpy(dA, A, SIZEARRAY * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dB, B, SIZEARRAY * sizeof(int), hipMemcpyHostToDevice);
//call the kernel
addVec << <1, SIZEARRAY >> >(dA, dB, dC, SIZEARRAY);
//copy data from device to host
hipMemcpy(C, dC, SIZEARRAY * sizeof(int), hipMemcpyDeviceToHost);
//.. at this point the data will be available to the host machine
hipFree(dA);
hipFree(dB);
hipFree(dC);
printArray(C);
delete[] A;
delete[] B;
delete[] C;
}
|
63deab1bef0344bc7324022ddca03fb1cd8b1ad1.cu
|
#include <cuda.h>
#include <stdio.h>
#define SIZEARRAY 200
__global__
void addVec(int* vecA, int* vecB, int* vecC, int size)
{
int i = threadIdx.x;
vecC[i] = vecA[i] + vecB[i];
}
void printArray(int* array)
{
for (int i = 0; i<SIZEARRAY; i++)
{
printf("%d ", array[i]);
}
}
int main()
{
//allocating memory for host
int *A = new int[SIZEARRAY];
int *B = new int[SIZEARRAY];
int *C = new int[SIZEARRAY];
//allocating memory for device
int *dA, *dB, *dC;
cudaMalloc((void**)&dA, SIZEARRAY * sizeof(int));
cudaMalloc((void**)&dB, SIZEARRAY * sizeof(int));
cudaMalloc((void**)&dC, SIZEARRAY * sizeof(int));
//initialize A and B in host
for (int i = 0; i<SIZEARRAY; i++)
{
A[i] = i;
B[i] = SIZEARRAY - i;
}
//copy from the host to device
cudaMemcpy(dA, A, SIZEARRAY * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, SIZEARRAY * sizeof(int), cudaMemcpyHostToDevice);
//call the kernel
addVec << <1, SIZEARRAY >> >(dA, dB, dC, SIZEARRAY);
//copy data from device to host
cudaMemcpy(C, dC, SIZEARRAY * sizeof(int), cudaMemcpyDeviceToHost);
//.. at this point the data will be available to the host machine
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
printArray(C);
delete[] A;
delete[] B;
delete[] C;
}
|
a12d247b15c57a5ede944f7d8d6e7fcd583b4438.hip
|
// !!! This is a file automatically generated by hipify!!!
// #include <hip/hip_runtime.h>
#include <sys/time.h>
#include <stdio.h>
#include <string.h>
#define THREADS 10
#define ROOM_SIZE 10
#define BLOCKS (ROOM_SIZE * ROOM_SIZE + THREADS - 1) / THREADS
#define ITERATION_LIMIT 100
__global__ void simulate_room(float *H) {
int index = threadIdx.x + blockIdx.x * THREADS;
int j = index % ROOM_SIZE;
int i = index / ROOM_SIZE;
float tmp = H[index];
for(int it = 0 ; it < ITERATION_LIMIT ; it++) {
if(i > 0 && i < ROOM_SIZE - 1 && j > 0 && j < ROOM_SIZE - 1)
tmp = 0.25 * (H[(i - 1) * ROOM_SIZE + j] + H[(i + 1) * ROOM_SIZE + j] + H[i * ROOM_SIZE + j + 1] + H[i * ROOM_SIZE + j - 1]);
__syncthreads();
H[index] = tmp;
__syncthreads();
}
}
int main(int argc, char* argv[]) {
float *h_H, *d_H;
h_H = (float *)malloc(sizeof(float) * ROOM_SIZE * ROOM_SIZE);
for(int i = 0 ; i < ROOM_SIZE ; i++) {
for(int j = 0 ; j < ROOM_SIZE ; j++)
h_H[i * ROOM_SIZE + j] = 0;
}
for(int i = 0 ; i < ROOM_SIZE ; i++) {
h_H[i * ROOM_SIZE + 0] = 20;
h_H[i * ROOM_SIZE + ROOM_SIZE - 1] = 20;
h_H[0 * ROOM_SIZE + i] = 20;
h_H[ROOM_SIZE * (ROOM_SIZE - 1) + i] = 20;
}
for(int i = 3 * (ROOM_SIZE) / 10 ; i < 7 * ROOM_SIZE / 10 ; i++)
h_H[i] = 100;
hipMalloc((void **) &d_H, sizeof(float) * ROOM_SIZE * ROOM_SIZE);
// for(int i = 0 ; i < ROOM_SIZE; i++) {
// for(int j = 0 ; j < ROOM_SIZE ; j++)
// printf("%.0f ", h_H[i][j]);
// printf("\n");
// }
hipMemcpy(d_H, h_H, sizeof(float) * ROOM_SIZE * ROOM_SIZE, hipMemcpyHostToDevice);
printf("THREADS %d BLOCKS %d\n", THREADS, BLOCKS);
struct timeval t1, t2;
gettimeofday(&t1, 0);
hipLaunchKernelGGL(( simulate_room), dim3(BLOCKS),dim3(THREADS), 0, 0, d_H);
hipDeviceSynchronize();
gettimeofday(&t2, 0);
double time1 = (t2.tv_usec-t1.tv_usec);
printf("Time for GPU: %.8f us \n", time1);
hipMemcpy(h_H, d_H, sizeof(float) * ROOM_SIZE * ROOM_SIZE, hipMemcpyDeviceToHost);
for(int i = 0 ; i < ROOM_SIZE; i++) {
for(int j = 0 ; j < ROOM_SIZE ; j++)
printf("%.0f ", h_H[i * ROOM_SIZE + j]);
printf("\n");
}
// for(int i = 0 ; i < ROOM_SIZE ; i++)
}
|
a12d247b15c57a5ede944f7d8d6e7fcd583b4438.cu
|
// #include <cuda_runtime.h>
#include <sys/time.h>
#include <stdio.h>
#include <string.h>
#define THREADS 10
#define ROOM_SIZE 10
#define BLOCKS (ROOM_SIZE * ROOM_SIZE + THREADS - 1) / THREADS
#define ITERATION_LIMIT 100
__global__ void simulate_room(float *H) {
int index = threadIdx.x + blockIdx.x * THREADS;
int j = index % ROOM_SIZE;
int i = index / ROOM_SIZE;
float tmp = H[index];
for(int it = 0 ; it < ITERATION_LIMIT ; it++) {
if(i > 0 && i < ROOM_SIZE - 1 && j > 0 && j < ROOM_SIZE - 1)
tmp = 0.25 * (H[(i - 1) * ROOM_SIZE + j] + H[(i + 1) * ROOM_SIZE + j] + H[i * ROOM_SIZE + j + 1] + H[i * ROOM_SIZE + j - 1]);
__syncthreads();
H[index] = tmp;
__syncthreads();
}
}
int main(int argc, char* argv[]) {
float *h_H, *d_H;
h_H = (float *)malloc(sizeof(float) * ROOM_SIZE * ROOM_SIZE);
for(int i = 0 ; i < ROOM_SIZE ; i++) {
for(int j = 0 ; j < ROOM_SIZE ; j++)
h_H[i * ROOM_SIZE + j] = 0;
}
for(int i = 0 ; i < ROOM_SIZE ; i++) {
h_H[i * ROOM_SIZE + 0] = 20;
h_H[i * ROOM_SIZE + ROOM_SIZE - 1] = 20;
h_H[0 * ROOM_SIZE + i] = 20;
h_H[ROOM_SIZE * (ROOM_SIZE - 1) + i] = 20;
}
for(int i = 3 * (ROOM_SIZE) / 10 ; i < 7 * ROOM_SIZE / 10 ; i++)
h_H[i] = 100;
cudaMalloc((void **) &d_H, sizeof(float) * ROOM_SIZE * ROOM_SIZE);
// for(int i = 0 ; i < ROOM_SIZE; i++) {
// for(int j = 0 ; j < ROOM_SIZE ; j++)
// printf("%.0f ", h_H[i][j]);
// printf("\n");
// }
cudaMemcpy(d_H, h_H, sizeof(float) * ROOM_SIZE * ROOM_SIZE, cudaMemcpyHostToDevice);
printf("THREADS %d BLOCKS %d\n", THREADS, BLOCKS);
struct timeval t1, t2;
gettimeofday(&t1, 0);
simulate_room<<<BLOCKS,THREADS>>>(d_H);
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
double time1 = (t2.tv_usec-t1.tv_usec);
printf("Time for GPU: %.8f us \n", time1);
cudaMemcpy(h_H, d_H, sizeof(float) * ROOM_SIZE * ROOM_SIZE, cudaMemcpyDeviceToHost);
for(int i = 0 ; i < ROOM_SIZE; i++) {
for(int j = 0 ; j < ROOM_SIZE ; j++)
printf("%.0f ", h_H[i * ROOM_SIZE + j]);
printf("\n");
}
// for(int i = 0 ; i < ROOM_SIZE ; i++)
}
|
25214d0870d3f9f5cd1acb691514df7b0caf8a14.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "im2col.h"
#include "hip/hip_runtime.h"
}
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col){
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
hipLaunchKernelGGL(( im2col_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK),
dim3(BLOCK), 0, 0,
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
}
|
25214d0870d3f9f5cd1acb691514df7b0caf8a14.cu
|
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "im2col.h"
#include "cuda.h"
}
__global__ void im2col_gpu_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_col) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(float *im,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_col){
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK,
BLOCK>>>(
num_kernels, im, height, width, ksize, pad,
stride, height_col,
width_col, data_col);
}
|
dfa2efa677b366214fe788d2a13832dd67bf884f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<sys/mman.h>
#include<assert.h>
#include<iostream>
#include<string.h>
#include "../common.h"
#include "types.h"
//#include<unordered_map>
//#include<cuda.h>
__device__ int CTALB = 0; //the lower bound of CTA id you want to profile
__device__ int CTAUB = 99999; //the upper bound of CTA id you want to profile
__device__ int CONSTANCE = 128;
__device__ int aliveCTA = 0;
//__device__ std::unordered_map< std::string, long> blockmap;
//__device__ std::vector<int> testt; //this DOESN'T work //"dynamic initialization doesn't work for __device__
__device__ bool VERBOSE=false;
__device__ bool CALLPATHVERBOSE=false;
extern "C"
{ //so that no mangling for function names
__device__ void takeString(void* , int);
__device__ void RetKernel(void*);
__device__ void passBasicBlock(int, int, int, int, void*);
__device__ void print5(void*, int, int, int, int, void*);
__device__ void print4(void*);
__device__ void callFunc(void* , void* , int , int, void*);
__device__ int getContextID(void*);
__device__ void* InitKernel(void*);
__device__ void print1(int);
}
__device__ unsigned long long ccnntt = 1; //the very first element is reserved for metadata
__device__ unsigned long long bbccnntt = 1; //the very first element is reserved for metadata
__device__ int* buffer_oN_DeViCe; //should be multiples of 6
//__device__ int* globalCallStack;
//__device__ CallSite_t* globalCallStack;
//__device__ int* stackHeight;
__device__ char funcDic[UNIQUE_FUNC_DEVICE][FUNC_NAME_LEN]; //maintains 100 unique functions and 31 chars for each
__device__ int dicHeight = 0; // size of funcDic[][]
__device__ CallSite_t contextDic[TOTAL_NUMBER_CONTEXT][CALL_PATH_LEN_DEVICE]; //maintains 100 unique contexts, each has up to 10 function
__device__ int cHeight = 0;
#ifdef PER_THREAD
__device__ bool per_thread_trace_d = true;
#else
__device__ bool per_thread_trace_d = false;
#endif
/*
#define MAX_NUM_CTAS 1024
__device__ volatile int arrIn[MAX_NUM_CTAS];//for inter-CTA sync
__device__ volatile int arrOut[MAX_NUM_CTAS];//for inter-CTA sync
__device__ void __sync_ctas(int goalVal) //, volatile int *arrIn, volatile int *arrOut)
{
// assuming there the number of threads/CTA is greater than the number of CTAs in the entire grid
// otherwise, dead loop
// this assumption hurts
int nBlockNum = gridDim.x * gridDim.y;
int bid = blockIdx.x* gridDim.y + blockIdx.y;
int tid = threadIdx.x * blockDim.y + threadIdx.y;
if (threadIdx.x + threadIdx.y ==0)
arrIn[bid] = goalVal;
if (bid==1)
{
if ( tid < nBlockNum)
{
while( arrIn[tid] != goalVal)
{}
}
__syncthreads();
if (tid < nBlockNum)
arrOut[tid] = goalVal;
}
if (tid==0)
while (arrOut[bid]!=goalVal)
{}
__syncthreads();
if ( tid==0 )
printf("d: CTA %d sync-ed\n", bid);
}
*/
/*
//this is from a published paper.
//but it doesn't work.
// probably because only one CTA is allowed on one SM, otherwise errors.
__device__ void __sync_ctas(int goalVal)
{
if (threadIdx.x + threadIdx.y ==0)
{
int id = atomicAdd( (int*)&g_mutex,1);
printf("d: CTA (%d, %d) got id=%d, goal is %d\n", blockIdx.x, blockIdx.y, id, goalVal);
return;
while (g_mutex != goalVal)
{}//busy wait
}
__syncthreads();
}
*/
__device__ void mystrcpy(char* dst, char* src)
{
int cnt = 0;
while ( src[cnt] != '\0' && cnt < FUNC_NAME_LEN-1) //never exceeds this 30 limit
{
dst[cnt] = src[cnt];
cnt++;
}
dst[cnt] = '\0';
return;
}
__device__ bool mystrcmp(char* dst, char* src)
{
int cnt = 0;
while ( cnt < FUNC_NAME_LEN-1 ) //never exceeds this 30 limit
{
if ( dst[cnt] == '\0' && src[cnt] == '\0')
return true;
if (dst[cnt] != src[cnt])
return false;
cnt++;
}
return true;
}
__device__ int getFuncID(char* func)
{
if (dicHeight == 0 ) //the very first function
{
mystrcpy(funcDic[0], func);
// printf("src: %s\n", func);
// printf("dst: %s\n", funcDic[0]);
dicHeight ++;
return 0;
}
// printf("d: height = %d\n", dicHeight);
for(int i=0; i < dicHeight; i++)
{
bool found = mystrcmp( funcDic[i], func );
// printf("d:: compare this pair: %s: \t%s \tVS\t %s\n", found?"yes":"no", funcDic[i], func);
if(found)
return i;
}
//return -1;//DEBUG
//if you are here, means we have a new func
mystrcpy(funcDic[dicHeight], func);
dicHeight ++;
return dicHeight-1;
}
__device__ void updateCallStack(int caller, int callee, short sline, short scolm, int bid, int tid, void* p_stackzone)
{
int offset = bid*blockDim.x*blockDim.y+tid;
// CallSite_t* callStack = (CallSite_t*) (&(globalCallStack[offset*CALL_PATH_LEN_DEVICE]));
// int &height = stackHeight[offset];
CallSite_t* callStack = (CallSite_t*) p_stackzone;
int bytesPerThread = (CALL_PATH_LEN_DEVICE*sizeof(CallSite_t));
int* temp = (int*)( (char*)p_stackzone + bytesPerThread+16); //offset by 16 to be safe, need to be consistent
int &height = *temp;
// int &h11 = * (int*)( (char*)p_stackzone + bytesPerThread);
// if(CALLPATHVERBOSE)
// printf( ":::::::: height = %d :::::::::\n", height);
// assert(height != 1 && "stack height != 1") ;
//return;//DUBUG
if (height==0)
{
// if (CALLPATHVERBOSE)
// printf("first ever. tid=%d\n", tid);
callStack[0].id = caller;
callStack[0].sline = sline;
callStack[0].scolm = scolm;
callStack[1].id = callee;
callStack[1].sline = -1;
callStack[1].scolm = -1;
height=2;
return;
}
int p_caller = callStack[height-2].id;
int p_callee = callStack[height-1].id;
if ( p_caller == caller && p_callee == callee)
{ //repeated call
// if (CALLPATHVERBOSE)
// printf("repeated call\n");
callStack[height-2].sline = sline;
callStack[height-2].scolm = scolm;
return;
}
else if ( p_caller == caller && p_callee != callee)
{ //the same parent called a different function, simply update the callee
// if (CALLPATHVERBOSE)
// printf("same caller different callee\n");
callStack[height-1].id = callee;
callStack[height-2].sline = sline;
callStack[height-2].scolm = scolm;
return;
}
else if ( p_callee == caller)
{ // a typical call path
// if (CALLPATHVERBOSE)
// printf("call sequence\n");
callStack[height-1].sline = sline;
callStack[height-1].scolm = scolm;
callStack[height].id = callee;
callStack[height].sline = -1;
callStack[height].scolm = -1;
height++;
return;
}
// return;//DUBUG
// if (CALLPATHVERBOSE)
// printf("the caller exists deeply in the stack\n");
// the caller exists deeply in the stack
for (int i=height-1; i>=0; i--)
{
if ( callStack[i].id == caller)
{
height = i+1;
callStack[i].id = callee;
callStack[i].sline = -1;
callStack[i].scolm = -1;
callStack[i].sline = sline;
callStack[i].scolm = scolm;
return;
}
}
// the caller exists deeply in the stack
// assert( (0==-1) && "!! undefined things happeened here\n");
}
/*
__device__ void printCallStack(int bid, int tid)
{
int offset = bid*blockDim.x*blockDim.y+tid;
CallSite_t* callStack = (CallSite_t*) (&(globalCallStack[offset*CALL_PATH_LEN_DEVICE]));
int height = stackHeight[offset];
printf(" d::: current call stack height: %d @ bid = %d, tid = %d = (%d,%d,%d,%d)\n", height, bid, tid, threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
if (height<1)
return;
for (int i=0; i<height; i++)
printf(" %d: call site: %d, (%d, %d)\n", i, callStack[i].id, callStack[i].sline, callStack[i].scolm );
}
*/
__device__ void* InitKernel(void* ptrhead)
{
//TODO:
if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs
return NULL;
int tid = threadIdx.x + threadIdx.y *blockDim.x;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
int global_tid = tid + bid*blockDim.x*blockDim.y;
int num_cta = gridDim.x*gridDim.y;
int num_thread = blockDim.x*blockDim.y;
__shared__ char* handler; //this pointer is for maintaing stack/callpath
__syncthreads();
int bytesPerThread = sizeof(CallSite_t)*CALL_PATH_LEN_DEVICE + 32;// I put 32 just to be safe
if ( tid ==0 )
{
handler = (char*) malloc( blockDim.x*blockDim.y*bytesPerThread);
assert( handler!=NULL);
// printf(" CTA \t%d\tgrabs memroy\t%p\n", bid, handler);
int rank = atomicAdd( &aliveCTA, 1);
printf(" CTA\t%d\tonline, total alive\t%d\n", bid, rank);
if (rank==0)
{
// if (tid%32==0)
{
// buffer_oN_DeViCe = (int*)ptrhead;
printf("\nd: InitKernel...\n");
printf("d: buffer pointer: %p\n", buffer_oN_DeViCe);
printf("d: size of kernel grid: %d, %d\t%d, %d\n", gridDim.x, gridDim.y, blockDim.x, blockDim.y);
}
}
//if (rank == 1)
}
buffer_oN_DeViCe = (int*)ptrhead;
__syncthreads();
void* stackzone = (void*)( handler + bytesPerThread*tid );
// if (tid==1)
// {
// stackHeight = (int*) ptr3;
// globalCallStack = (CallSite_t*)ptr2;
// buffer_oN_DeViCe = (int*)ptrhead;
//}
// if (tid ==0)
// printf("d: DEBUG: here1 from CTA %d\n", bid);
return stackzone;
// if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0)
// vunlunteer to do the initialization
/*
done2 = atomicAdd(&done2, 1);
if ( stackHeight==NULL && done2 == 2)
{
printf("I will 2nd malloc() %ld by (%d, %d)\n", numthreads*sizeof(int) , bid, tid);
stackHeight = (int*)malloc( sizeof(int) * numthreads);
printf(" 2nd malloc() done by (%d, %d)\n", bid, tid);
}
else{ holdon( bid*10000); }
__syncthreads();
holdon(bid*10000);
// done1 = atomicAdd(&done1, 1);
// if ( globalCallStack==NULL && tid==0)
// {
// printf("I will malloc() %ld by (%d, %d)\n", numthreads*sizeof(CallSite_t*)* UNIQUE_FUNC_DEVICE ,bid, tid);
// globalCallStack = (CallSite_t**) malloc(sizeof(CallSite_t*) * numthreads);
//
// }
if ( globalCallStack[global_tid] ==NULL)
{ //DEBUG: there are still repeated allocation from the SAME thread
globalCallStack[global_tid] = (CallSite_t*) malloc(UNIQUE_FUNC_DEVICE* sizeof(CallSite_t) );
printf("I do it by myself %ld @ %p by (%d, %d)=%d\n", sizeof(CallSite_t*)* UNIQUE_FUNC_DEVICE, globalCallStack[global_tid], bid, tid, global_tid);
}
__syncthreads();
stackHeight[global_tid] = 0;
__syncthreads();
printf("__ back from InitKernel: %d, %d\n", bid, tid);
*/ //Du: July 10
/* else
{ //wait and see
int cnt = 0;
while ( globalCallStack==NULL )
cnt++;
while ( stackHeight==NULL)
cnt++;
}
*/
/*
int mask = __ballot(1);
int leader = __ffs(mask)-1;
if( globalCallStack==NULL && leader == threadIdx.x%32)
{
long numthreads = gridDim.x*gridDim.y*blockDim.x*blockDim.y*32;
printf("I will malloc() %ld by (%d, %d)\n", numthreads*sizeof(CallSite_t*)* UNIQUE_FUNC_DEVICE , threadIdx.x, threadIdx.y);
globalCallStack = (CallSite_t**) malloc(sizeof(CallSite_t*) * numthreads);
for( int i = 0; i<numthreads; i++)
globalCallStack[i] = (CallSite_t*) malloc(UNIQUE_FUNC_DEVICE* sizeof(CallSite_t) );
stackHeight = (int*)malloc( sizeof(int) * numthreads);
for (int i=0; i<numthreads; i++)
stackHeight[i] = 0;
}
*/
}
__device__ void callFunc(void* er, void* ee, int sline, int scolm, void* p_stackzone)
{
// if (threadIdx.x != 0 || blockIdx.x != 0 || threadIdx.y != 0 || blockIdx.y != 0) return; //DEBUG
// printf("d::%d\n", sline );
// printf("d::%s\n", (char*)er );
// if (CALLPATHVERBOSE)
// printf("d:::: >>>>\n");
int id1 = getFuncID( (char*)er );
int id2 = getFuncID( (char*)ee );
// if (CALLPATHVERBOSE)
// {
// printf("d:::: ID: %d :%s\n", id1, (char*)er );
// printf("d:::: ID: %d :%s\n", id2, (char*)ee );
// }
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
int global_tid = bid * (blockDim.x * blockDim.y) + tid;
updateCallStack(id1, id2, (short) sline, (short) scolm, bid, tid, p_stackzone);
// printCallStack(global_tid);
// if (CALLPATHVERBOSE)
// printf("d:::: <<<<\n");
}
/*
__device__ void takeString(void* p, int action)
{
if (threadIdx.x != 0 || blockIdx.x != 0 || threadIdx.y != 0 || blockIdx.y != 0) return;
if (VERBOSE)
{
if (action==1)
printf("d: caller: %s\n",(char*)p);
else if (action==2)
printf("d: callee: %s\n",(char*)p);
else if (action==3)
printf("d: return: %s\n",(char*)p);
else
printf("d: undefined: %s\n",(char*)p);
}
return;
}
*/
__device__ void cxtprint(int id)
{
if (id<0)
return;
printf("d::: requested context id: %d out of %d\n", id, cHeight);
for (int i = 0; i< CALL_PATH_LEN_DEVICE && contextDic[id][i].id != -1 ; i++)
{
printf("d::::::: current context [%d][%d]: %d, %d, %d\n", id, i, contextDic[id][i].id, contextDic[id][i].sline, contextDic[id][i].scolm) ;
}
return;
}
__device__ void cxtcpy( CallSite_t* dst, CallSite_t* src , int height) //context copy
{
int i;
for( i=0; i< height; i++)
dst[i] = src[i];
// assert(i<CALL_PATH_LEN_DEVICE && "code: e56: call stack too deep");
dst[i].id = -1; //to mark the ending of one context
return;
}
__device__ bool cxtcmp( CallSite_t* dst, CallSite_t* src, int height)
{
for( int i=0; i< height; i++)
if ( dst[i].id == src[i].id ) // && dst[i].id == src[i].id &&
continue;
else
return false;
return true;
}
__device__ int getContextID(void* p_stackzone)
{ //shared by all treahds, there are races
//you can manually to take care of serialization?
// if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y != 0 ) return -2; //DEBUG
int bid = blockIdx.x + blockIdx.y * gridDim.x;
int tid = threadIdx.y * blockDim.x + threadIdx.x;
// int offset = bid*blockDim.x*blockDim.y+tid;
// CallSite_t* callStack = (CallSite_t*) (&(globalCallStack[offset*CALL_PATH_LEN_DEVICE]));
// int &height = stackHeight[offset];
CallSite_t* callStack = (CallSite_t*) p_stackzone;
int bytesPerThread = (CALL_PATH_LEN_DEVICE*sizeof(CallSite_t));
int* temp = (int*)( (char*)p_stackzone + bytesPerThread+16); //offset by 8 to be safe, need to be consistent
int &height = *temp;
if ( height ==0) //it is possible that call stack is still empty
return -1;
if (cHeight==0)// the first ever context in the dic
{
// if (CALLPATHVERBOSE)
// printf("d::: the very first context in dic, depth=%d\n", height);
cxtcpy(contextDic[0], callStack, height );
cHeight=1;
return 0;
}
// something already exists
// if (CALLPATHVERBOSE)
// {
// printf("d::: going to match existing items in context dic\n");
// printf("d::: number of existing contexts: %d\n", cHeight);
// }
int i;
for (i = 0; i<cHeight; i++)
{
if ( cxtcmp( contextDic[i], callStack, height ) ) //yes, found
{
// if (CALLPATHVERBOSE)
// printf("d::: matched, returning %d, depth=%d\n",i, height);
return i;
}
}
// if (CALLPATHVERBOSE)
// printf("d::: not found, value of i: %d\n", i);
// assert (i< TOTAL_NUMBER_CONTEXT && "code:e34: Not enough space for Context Dic, index i");
// printCallStack();
cxtcpy(contextDic[i], callStack, height );
cHeight = i+1;
// assert (cHeight < TOTAL_NUMBER_CONTEXT && "code:e41: Not enough space for Context Dic, cHeight");
// if (CALLPATHVERBOSE)
// printf("d::: inserted new one: id = %d, depth=%d\n", i, height);
return i;
}
__device__ void passBasicBlock(int tmp /*pointer to block name*/, int action, int sline, int scolm, void* p_stackzone)
{
if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs
return;
int map = __ballot(1);
//int map = __activemask();
int numActive = __popc(map);
if ( buffer_oN_DeViCe == NULL)
return;
if (numActive==32)
{
//then choose one thread to write numbers
int tid = threadIdx.x + threadIdx.y *blockDim.x;
if (tid%32==0)
{
//do the writing
// printf("I will write for my warp tid=(%d, %d)\n", threadIdx.x, threadIdx.y);
int bid = atomicAdd(&bbccnntt, 1);
unsigned long long key=0;
BBlog_t* bblog = (BBlog_t*) buffer_oN_DeViCe;
bblog[bid].key = key;
bblog[bid].tidx = (short)threadIdx.x;
bblog[bid].tidy = (short)threadIdx.y;
bblog[bid].bidx = (short)blockIdx.x;
bblog[bid].bidy = (short)blockIdx.y;
bblog[bid].sline = sline;
bblog[bid].scolm = scolm;
bblog[bid].cid = getContextID(p_stackzone);
}
}
else
{
//every thread needs to write
//printf("I will write for my self tid=(%d, %d)\n", threadIdx.x, threadIdx.y);
int bid = atomicAdd(&bbccnntt, 1);
unsigned long long key=0;
BBlog_t* bblog = (BBlog_t*) buffer_oN_DeViCe;
bblog[bid].key = key;
bblog[bid].tidx = (short)threadIdx.x;
bblog[bid].tidy = (short)threadIdx.y;
bblog[bid].bidx = (short)blockIdx.x;
bblog[bid].bidy = (short)blockIdx.y;
bblog[bid].sline = sline;
bblog[bid].scolm = scolm;
bblog[bid].cid = getContextID(p_stackzone);
}
return;
}
/*
__device__ void passBasicBlock(int tmp , int action, int sline, int scolm, void* p_stackzone)
//__device__ void passBasicBlock(void* p , int action, int sline, int scolm, void* p_stackzone)
{
if ( buffer_oN_DeViCe == NULL)
return;
assert ( (bbccnntt < BUFFERSIZE/24 - 128) && "code: e317: too many entries to the buffer" ); //DO NOT COMMENT OUT
if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs
return;
// if (threadIdx.x + blockIdx.x + threadIdx.y + blockIdx.y == 0)
// getFuncID( (char*)p); //DEBUG
// printf("d: basic block: %s \ttid: (%d, %d)\n", str, threadIdx.x, threadIdx.y) ;
int bid = atomicAdd(&bbccnntt, 1);
if (bid > BUFFERSIZE/sizeof(BBlog_t) - 128) //overflow protection
return;
// for(int i=0; *(str+i) != 0; i++)
// {
// printf("%c", *(str+i) );
// }
// printf("\n");
unsigned long long key=0;
int cnt = 0;
long long factor = 1;
char* str = (char*)p;
for(int i=0; *(str+i) != 0; i++)
{
int ascii = (int)(*(str+i)) ;
if (ascii<48 || ascii > 123)
continue;
key += ascii*factor;
factor *= CONSTANCE;
// printf("%d\t", (int)(*(str+i)) );
// printf("key of %s is \t %llu\n", str, key);
}
// printf("key of %s is \t %llu\n", str, key);
// printf("\n");
BBlog_t* bblog = (BBlog_t*) buffer_oN_DeViCe;
// bblog[bid].key = key;
bblog[bid].tidx = (short)threadIdx.x;
bblog[bid].tidy = (short)threadIdx.y;
bblog[bid].bidx = (short)blockIdx.x;
bblog[bid].bidy = (short)blockIdx.y;
bblog[bid].sline = sline;
bblog[bid].scolm = scolm;
// bblog[bid].cid = getContextID(p_stackzone);
//printf("d:: context ID: %d\n", bblog[bid].cid);
// if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 )
// printf("d:: context ID= %d\n", bblog[bid].cid);
return;
}
*/
__device__ void storeLines(void* p, short size/*bytes*/, short line, short colmn, short op /*load or store*/, void* p_stackzone)
{
if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs
return;
int map = __ballot(1);
//int map = __activemask();
int numActive = __popc(map);
if ( ccnntt > (int)(((long)BUFFERSIZE)/24) - 128*100)
return; //DEBUG
assert ( (ccnntt < BUFFERSIZE/24 - 128) && "code: e31: too many entries to the buffer"); //DO NOT COMMENT OUT
//d_trace[bid].bidx = blockIdx.x;
//d_trace[bid].tidx = threadIdx.x;
//d_trace[bid].ea = p;
//d_trace[bid].bytes = size;
//printf(" d : bid = %d from (%d,%d) (%d,%d) \n", bid, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y);
//assert ( (buffer_oN_DeViCe!=NULL) && "buffer_oN_DeViCe is null");
if (buffer_oN_DeViCe==NULL)
return;
int bid = atomicAdd(&ccnntt, 1);
//printf ("%d\n", bid);
//__syncthreads(); //IMPORTANT to sync here
if(true)
{
int tid = threadIdx.x + threadIdx.y *blockDim.x;
//if ( tid%32==0 || true)
if (true)
{
short* buffer_oN_DeViCe_short = (short*) buffer_oN_DeViCe;
long* buffer_oN_DeViCe_long = (long*) buffer_oN_DeViCe;
buffer_oN_DeViCe_short[bid*12+0] = (short)blockIdx.x;
buffer_oN_DeViCe_short[bid*12+1] = (short)blockIdx.y;
buffer_oN_DeViCe_short[bid*12+2] = (short)threadIdx.x;
buffer_oN_DeViCe_short[bid*12+3] = (short)threadIdx.y;
buffer_oN_DeViCe_long[bid*3+1] = (long)p;
buffer_oN_DeViCe_short[bid*12+8] = size;
buffer_oN_DeViCe_short[bid*12+9] = line;
buffer_oN_DeViCe_short[bid*12+10] = colmn;
buffer_oN_DeViCe_short[bid*12+11] = op;
getContextID(p_stackzone);
}
}
}
/*
__device__ void dumpLines(void)
{
if (threadIdx.x != 0 || blockIdx.x != 0 || threadIdx.y != 0 || blockIdx.y != 0) return;
int ii;
for(ii=1; ii< ccnntt; ii=ii+6)
{
// printf("d: %d Bytes at %p by (%d, %d)\n", buffer_oN_DeViCe[ii*6+4], buffer_oN_DeViCe[ii*6+2], buffer_oN_DeViCe[ii*6], buffer_oN_DeViCe[ii*6+1] );
}
// printf("\n" );
// const char* ss = "this is the end";
// void *ps = ss;
// takeString(ps);
//Or, this also works.
// char s[200] = "this is the end"; void *ps = &(s); takeString(s);
// printf("try mmap\n" );
// void* ptr = mmap(NULL, 1000, PROT_READ | PROT_WRITE| PROT_EXEC, MAP_SHARED, -1, 0);
// printf("%p\n",ptr );
}
*/
__device__ void print1(int a)
{
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0)
printf("d: print1: %d\n", a);
return;
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE)
{
if (a==1)
printf("d: load by CTA (%d,%d)\n", blockIdx.x, blockIdx.y);
else if (a==2)
printf("d: store by CTA (%d,%d)\n", blockIdx.x, blockIdx.y);
else
printf("d: !!! undefined !!! \n" );
}
}
/*
__device__ void print2()
{
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE)
printf("d: store by CTA (%d,%d)\n", blockIdx.x, blockIdx.y);
}
*/
__device__ void print3(int line, int col)
{
return;
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE)
printf("d: source line: %d\t column: %d by CTA (%d,%d)\n", line, col, blockIdx.x, blockIdx.y);
}
__device__ void print4(void* p)
{
//if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE)
printf("d: print4: %p\n", p);
}
__device__ void print5(void* p, int bits, int sline, int scolm, int op, void* p_stackzone)
{
// if ( (blockIdx.x + blockIdx.y* gridDim.x) * (blockDim.x * blockDim.y) >= 32*128) // no more than 128 warps
// return;
// printf("d: ea: %p by (%d,%d) (%d,%d), CTA id = %d\n",p, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y , (blockIdx.x + blockIdx.y* gridDim.x));
/*if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs
return;*/
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
int gtid = tid + blockDim.x * blockDim.y * bid;
Entry_t* entry = (Entry_t*) p;
storeLines(p, (short)(bits/8), (short)sline, (short) scolm, (short)op, p_stackzone);
// printf("d: ea: %p by (%d,%d) (%d,%d), CTA id = %d\n",p, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y , (blockIdx.x + blockIdx.y* gridDim.x));
// printf("d: ea: %p by (%d,%d) (%d,%d)\n",p, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y );
}
////
__device__ void RetKernel(void* p_stackzone)
{
if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs
return;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
int tid = threadIdx.x + threadIdx.y *blockDim.x;
__syncthreads(); //IMPORTANT to sync here
int rank = -1;
if ( tid == 0)
{
// printf(" CTA\t%d\treleases:\t%p\n", bid, stackzone);
// atomicAdd( &alive, -1);
if (p_stackzone!=NULL)
{
free(p_stackzone);
rank = atomicAdd( &aliveCTA, -1);
printf("CTA\t%d\texits, total remains\t%d\n", bid, rank);
}
else
printf("d:: p_stack is hacked!!\n");
}
__syncthreads();
if (threadIdx.x + threadIdx.y == 0 && rank ==1 )
{
printf("d: in RetKernel...\n");
// for (int kk=0; kk< cHeight; kk++)
// cxtprint( kk );
if (true)
{ //memory
short* buffer_oN_DeViCe_short = (short*) buffer_oN_DeViCe;
buffer_oN_DeViCe_short[0+0] = blockDim.x; // Be consistent with print.cpp, dumpTrace()
buffer_oN_DeViCe_short[0+1] = blockDim.y;
buffer_oN_DeViCe_short[0+2] = gridDim.x;
buffer_oN_DeViCe_short[0+3] = gridDim.y;
printf("d: Kernel Returns: collected [ %llu ] memory entries. \n" , ccnntt);
printf("d: Kernel Returns: collected [ %llu ] memory entries. \n" , bbccnntt);
long* buffer_oN_DeViCe_long = (long*) buffer_oN_DeViCe;
buffer_oN_DeViCe_long[0+1] = ccnntt;
}
else
{ //branch
BBlog_t* bbbuffer_oN_DeViCe_short = (BBlog_t*) buffer_oN_DeViCe;
bbbuffer_oN_DeViCe_short[0].bidx = blockDim.x; // Be consistent with print.cpp, dumpTrace()
bbbuffer_oN_DeViCe_short[0].bidy = blockDim.y;
bbbuffer_oN_DeViCe_short[0].tidx = gridDim.x;
bbbuffer_oN_DeViCe_short[0].tidy = gridDim.y;
bbbuffer_oN_DeViCe_short[0].key = bbccnntt;
bbbuffer_oN_DeViCe_short[0].sline = 0;
bbbuffer_oN_DeViCe_short[0].scolm = 0;
printf("d: Kernel Returns: collected [ %llu ] BB logs. \n" , bbccnntt);
printf("d: Kernel Returns: collected [ %llu ] BB logs. \n" , ccnntt);
}
unsigned long offset1 = ((UNIQUE_FUNC_DEVICE* FUNC_NAME_LEN*sizeof(char))/1024+1)*1024;
unsigned long offset2 = ((TOTAL_NUMBER_CONTEXT * CALL_PATH_LEN_DEVICE* sizeof(CallSite_t))/1024+1)*1024 + offset1;
printf("size of function dic: %d %d %lu -> %lu , rounded to %lu\n", UNIQUE_FUNC_DEVICE, FUNC_NAME_LEN, sizeof(char), UNIQUE_FUNC_DEVICE*FUNC_NAME_LEN*sizeof(char), offset1 );
printf("size of context dic: %d %d %lu -> %lu , rounded to %lu\n", TOTAL_NUMBER_CONTEXT, CALL_PATH_LEN_DEVICE, sizeof(CallSite_t), TOTAL_NUMBER_CONTEXT* CALL_PATH_LEN_DEVICE* sizeof(CallSite_t) , offset2);
//function dic is the last,
//context dic is second to last
void* ptr;
ptr = (void*)( buffer_oN_DeViCe + (BUFFERSIZE - offset1)/sizeof(int)) ; //operate on a int*, not a void*
memcpy( ptr, funcDic, UNIQUE_FUNC_DEVICE *FUNC_NAME_LEN*sizeof(char) );
ptr = (void*)(buffer_oN_DeViCe + (BUFFERSIZE - offset2)/sizeof(int)) ; //operate on a int*, not a void*
memcpy( ptr, contextDic, TOTAL_NUMBER_CONTEXT * CALL_PATH_LEN_DEVICE*sizeof(CallSite_t) );
/* BBlog_t* tmpbb = (BBlog_t*) buffer_oN_DeViCe;
for (int i=1; i<bbccnntt; i++)
{
printf(" %d\t", tmpbb[i].bidx);
printf(" %d\t", tmpbb[i].bidy);
printf(" %d\t", tmpbb[i].tidx);
printf(" %d\t", tmpbb[i].tidy);
printf(" %llu\t", tmpbb[i].key);
printf(" %d\t", tmpbb[i].sline);
printf(" %d\t", tmpbb[i].scolm);
printf("\n");
}
*/
/*
Entry_t* tp = (Entry_t*) buffer_oN_DeViCe;
int i;
for (i=1; i<ccnntt; i++)
{
printf(" d: bid (%d,%d) \ttid (%d,%d) \t%p\t%d,%d\t%d\n", tp[i].bidx, tp[i].bidy, tp[i].tidx, tp[i].tidy, tp[i].ea, tp[i].sline, tp[i].scolm, tp[i].op);
}
*/
ccnntt = 1; //reset, prepares for next kernel call
bbccnntt = 1; //reset, prepares for next kernel call
}//end of if
}
|
dfa2efa677b366214fe788d2a13832dd67bf884f.cu
|
#include<sys/mman.h>
#include<assert.h>
#include<iostream>
#include<string.h>
#include "../common.h"
#include "types.h"
//#include<unordered_map>
//#include<cuda.h>
__device__ int CTALB = 0; //the lower bound of CTA id you want to profile
__device__ int CTAUB = 99999; //the upper bound of CTA id you want to profile
__device__ int CONSTANCE = 128;
__device__ int aliveCTA = 0;
//__device__ std::unordered_map< std::string, long> blockmap;
//__device__ std::vector<int> testt; //this DOESN'T work //"dynamic initialization doesn't work for __device__
__device__ bool VERBOSE=false;
__device__ bool CALLPATHVERBOSE=false;
extern "C"
{ //so that no mangling for function names
__device__ void takeString(void* , int);
__device__ void RetKernel(void*);
__device__ void passBasicBlock(int, int, int, int, void*);
__device__ void print5(void*, int, int, int, int, void*);
__device__ void print4(void*);
__device__ void callFunc(void* , void* , int , int, void*);
__device__ int getContextID(void*);
__device__ void* InitKernel(void*);
__device__ void print1(int);
}
__device__ unsigned long long ccnntt = 1; //the very first element is reserved for metadata
__device__ unsigned long long bbccnntt = 1; //the very first element is reserved for metadata
__device__ int* buffer_oN_DeViCe; //should be multiples of 6
//__device__ int* globalCallStack;
//__device__ CallSite_t* globalCallStack;
//__device__ int* stackHeight;
__device__ char funcDic[UNIQUE_FUNC_DEVICE][FUNC_NAME_LEN]; //maintains 100 unique functions and 31 chars for each
__device__ int dicHeight = 0; // size of funcDic[][]
__device__ CallSite_t contextDic[TOTAL_NUMBER_CONTEXT][CALL_PATH_LEN_DEVICE]; //maintains 100 unique contexts, each has up to 10 function
__device__ int cHeight = 0;
#ifdef PER_THREAD
__device__ bool per_thread_trace_d = true;
#else
__device__ bool per_thread_trace_d = false;
#endif
/*
#define MAX_NUM_CTAS 1024
__device__ volatile int arrIn[MAX_NUM_CTAS];//for inter-CTA sync
__device__ volatile int arrOut[MAX_NUM_CTAS];//for inter-CTA sync
__device__ void __sync_ctas(int goalVal) //, volatile int *arrIn, volatile int *arrOut)
{
// assuming there the number of threads/CTA is greater than the number of CTAs in the entire grid
// otherwise, dead loop
// this assumption hurts
int nBlockNum = gridDim.x * gridDim.y;
int bid = blockIdx.x* gridDim.y + blockIdx.y;
int tid = threadIdx.x * blockDim.y + threadIdx.y;
if (threadIdx.x + threadIdx.y ==0)
arrIn[bid] = goalVal;
if (bid==1)
{
if ( tid < nBlockNum)
{
while( arrIn[tid] != goalVal)
{}
}
__syncthreads();
if (tid < nBlockNum)
arrOut[tid] = goalVal;
}
if (tid==0)
while (arrOut[bid]!=goalVal)
{}
__syncthreads();
if ( tid==0 )
printf("d: CTA %d sync-ed\n", bid);
}
*/
/*
//this is from a published paper.
//but it doesn't work.
// probably because only one CTA is allowed on one SM, otherwise errors.
__device__ void __sync_ctas(int goalVal)
{
if (threadIdx.x + threadIdx.y ==0)
{
int id = atomicAdd( (int*)&g_mutex,1);
printf("d: CTA (%d, %d) got id=%d, goal is %d\n", blockIdx.x, blockIdx.y, id, goalVal);
return;
while (g_mutex != goalVal)
{}//busy wait
}
__syncthreads();
}
*/
__device__ void mystrcpy(char* dst, char* src)
{
int cnt = 0;
while ( src[cnt] != '\0' && cnt < FUNC_NAME_LEN-1) //never exceeds this 30 limit
{
dst[cnt] = src[cnt];
cnt++;
}
dst[cnt] = '\0';
return;
}
__device__ bool mystrcmp(char* dst, char* src)
{
int cnt = 0;
while ( cnt < FUNC_NAME_LEN-1 ) //never exceeds this 30 limit
{
if ( dst[cnt] == '\0' && src[cnt] == '\0')
return true;
if (dst[cnt] != src[cnt])
return false;
cnt++;
}
return true;
}
__device__ int getFuncID(char* func)
{
if (dicHeight == 0 ) //the very first function
{
mystrcpy(funcDic[0], func);
// printf("src: %s\n", func);
// printf("dst: %s\n", funcDic[0]);
dicHeight ++;
return 0;
}
// printf("d: height = %d\n", dicHeight);
for(int i=0; i < dicHeight; i++)
{
bool found = mystrcmp( funcDic[i], func );
// printf("d:: compare this pair: %s: \t%s \tVS\t %s\n", found?"yes":"no", funcDic[i], func);
if(found)
return i;
}
//return -1;//DEBUG
//if you are here, means we have a new func
mystrcpy(funcDic[dicHeight], func);
dicHeight ++;
return dicHeight-1;
}
__device__ void updateCallStack(int caller, int callee, short sline, short scolm, int bid, int tid, void* p_stackzone)
{
int offset = bid*blockDim.x*blockDim.y+tid;
// CallSite_t* callStack = (CallSite_t*) (&(globalCallStack[offset*CALL_PATH_LEN_DEVICE]));
// int &height = stackHeight[offset];
CallSite_t* callStack = (CallSite_t*) p_stackzone;
int bytesPerThread = (CALL_PATH_LEN_DEVICE*sizeof(CallSite_t));
int* temp = (int*)( (char*)p_stackzone + bytesPerThread+16); //offset by 16 to be safe, need to be consistent
int &height = *temp;
// int &h11 = * (int*)( (char*)p_stackzone + bytesPerThread);
// if(CALLPATHVERBOSE)
// printf( ":::::::: height = %d :::::::::\n", height);
// assert(height != 1 && "stack height != 1") ;
//return;//DUBUG
if (height==0)
{
// if (CALLPATHVERBOSE)
// printf("first ever. tid=%d\n", tid);
callStack[0].id = caller;
callStack[0].sline = sline;
callStack[0].scolm = scolm;
callStack[1].id = callee;
callStack[1].sline = -1;
callStack[1].scolm = -1;
height=2;
return;
}
int p_caller = callStack[height-2].id;
int p_callee = callStack[height-1].id;
if ( p_caller == caller && p_callee == callee)
{ //repeated call
// if (CALLPATHVERBOSE)
// printf("repeated call\n");
callStack[height-2].sline = sline;
callStack[height-2].scolm = scolm;
return;
}
else if ( p_caller == caller && p_callee != callee)
{ //the same parent called a different function, simply update the callee
// if (CALLPATHVERBOSE)
// printf("same caller different callee\n");
callStack[height-1].id = callee;
callStack[height-2].sline = sline;
callStack[height-2].scolm = scolm;
return;
}
else if ( p_callee == caller)
{ // a typical call path
// if (CALLPATHVERBOSE)
// printf("call sequence\n");
callStack[height-1].sline = sline;
callStack[height-1].scolm = scolm;
callStack[height].id = callee;
callStack[height].sline = -1;
callStack[height].scolm = -1;
height++;
return;
}
// return;//DUBUG
// if (CALLPATHVERBOSE)
// printf("the caller exists deeply in the stack\n");
// the caller exists deeply in the stack
for (int i=height-1; i>=0; i--)
{
if ( callStack[i].id == caller)
{
height = i+1;
callStack[i].id = callee;
callStack[i].sline = -1;
callStack[i].scolm = -1;
callStack[i].sline = sline;
callStack[i].scolm = scolm;
return;
}
}
// the caller exists deeply in the stack
// assert( (0==-1) && "!! undefined things happeened here\n");
}
/*
__device__ void printCallStack(int bid, int tid)
{
int offset = bid*blockDim.x*blockDim.y+tid;
CallSite_t* callStack = (CallSite_t*) (&(globalCallStack[offset*CALL_PATH_LEN_DEVICE]));
int height = stackHeight[offset];
printf(" d::: current call stack height: %d @ bid = %d, tid = %d = (%d,%d,%d,%d)\n", height, bid, tid, threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
if (height<1)
return;
for (int i=0; i<height; i++)
printf(" %d: call site: %d, (%d, %d)\n", i, callStack[i].id, callStack[i].sline, callStack[i].scolm );
}
*/
__device__ void* InitKernel(void* ptrhead)
{
//TODO:
if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs
return NULL;
int tid = threadIdx.x + threadIdx.y *blockDim.x;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
int global_tid = tid + bid*blockDim.x*blockDim.y;
int num_cta = gridDim.x*gridDim.y;
int num_thread = blockDim.x*blockDim.y;
__shared__ char* handler; //this pointer is for maintaing stack/callpath
__syncthreads();
int bytesPerThread = sizeof(CallSite_t)*CALL_PATH_LEN_DEVICE + 32;// I put 32 just to be safe
if ( tid ==0 )
{
handler = (char*) malloc( blockDim.x*blockDim.y*bytesPerThread);
assert( handler!=NULL);
// printf(" CTA \t%d\tgrabs memroy\t%p\n", bid, handler);
int rank = atomicAdd( &aliveCTA, 1);
printf(" CTA\t%d\tonline, total alive\t%d\n", bid, rank);
if (rank==0)
{
// if (tid%32==0)
{
// buffer_oN_DeViCe = (int*)ptrhead;
printf("\nd: InitKernel...\n");
printf("d: buffer pointer: %p\n", buffer_oN_DeViCe);
printf("d: size of kernel grid: %d, %d\t%d, %d\n", gridDim.x, gridDim.y, blockDim.x, blockDim.y);
}
}
//if (rank == 1)
}
buffer_oN_DeViCe = (int*)ptrhead;
__syncthreads();
void* stackzone = (void*)( handler + bytesPerThread*tid );
// if (tid==1)
// {
// stackHeight = (int*) ptr3;
// globalCallStack = (CallSite_t*)ptr2;
// buffer_oN_DeViCe = (int*)ptrhead;
//}
// if (tid ==0)
// printf("d: DEBUG: here1 from CTA %d\n", bid);
return stackzone;
// if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0)
// vunlunteer to do the initialization
/*
done2 = atomicAdd(&done2, 1);
if ( stackHeight==NULL && done2 == 2)
{
printf("I will 2nd malloc() %ld by (%d, %d)\n", numthreads*sizeof(int) , bid, tid);
stackHeight = (int*)malloc( sizeof(int) * numthreads);
printf(" 2nd malloc() done by (%d, %d)\n", bid, tid);
}
else{ holdon( bid*10000); }
__syncthreads();
holdon(bid*10000);
// done1 = atomicAdd(&done1, 1);
// if ( globalCallStack==NULL && tid==0)
// {
// printf("I will malloc() %ld by (%d, %d)\n", numthreads*sizeof(CallSite_t*)* UNIQUE_FUNC_DEVICE ,bid, tid);
// globalCallStack = (CallSite_t**) malloc(sizeof(CallSite_t*) * numthreads);
//
// }
if ( globalCallStack[global_tid] ==NULL)
{ //DEBUG: there are still repeated allocation from the SAME thread
globalCallStack[global_tid] = (CallSite_t*) malloc(UNIQUE_FUNC_DEVICE* sizeof(CallSite_t) );
printf("I do it by myself %ld @ %p by (%d, %d)=%d\n", sizeof(CallSite_t*)* UNIQUE_FUNC_DEVICE, globalCallStack[global_tid], bid, tid, global_tid);
}
__syncthreads();
stackHeight[global_tid] = 0;
__syncthreads();
printf("__ back from InitKernel: %d, %d\n", bid, tid);
*/ //Du: July 10
/* else
{ //wait and see
int cnt = 0;
while ( globalCallStack==NULL )
cnt++;
while ( stackHeight==NULL)
cnt++;
}
*/
/*
int mask = __ballot(1);
int leader = __ffs(mask)-1;
if( globalCallStack==NULL && leader == threadIdx.x%32)
{
long numthreads = gridDim.x*gridDim.y*blockDim.x*blockDim.y*32;
printf("I will malloc() %ld by (%d, %d)\n", numthreads*sizeof(CallSite_t*)* UNIQUE_FUNC_DEVICE , threadIdx.x, threadIdx.y);
globalCallStack = (CallSite_t**) malloc(sizeof(CallSite_t*) * numthreads);
for( int i = 0; i<numthreads; i++)
globalCallStack[i] = (CallSite_t*) malloc(UNIQUE_FUNC_DEVICE* sizeof(CallSite_t) );
stackHeight = (int*)malloc( sizeof(int) * numthreads);
for (int i=0; i<numthreads; i++)
stackHeight[i] = 0;
}
*/
}
__device__ void callFunc(void* er, void* ee, int sline, int scolm, void* p_stackzone)
{
// if (threadIdx.x != 0 || blockIdx.x != 0 || threadIdx.y != 0 || blockIdx.y != 0) return; //DEBUG
// printf("d::%d\n", sline );
// printf("d::%s\n", (char*)er );
// if (CALLPATHVERBOSE)
// printf("d:::: >>>>\n");
int id1 = getFuncID( (char*)er );
int id2 = getFuncID( (char*)ee );
// if (CALLPATHVERBOSE)
// {
// printf("d:::: ID: %d :%s\n", id1, (char*)er );
// printf("d:::: ID: %d :%s\n", id2, (char*)ee );
// }
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
int global_tid = bid * (blockDim.x * blockDim.y) + tid;
updateCallStack(id1, id2, (short) sline, (short) scolm, bid, tid, p_stackzone);
// printCallStack(global_tid);
// if (CALLPATHVERBOSE)
// printf("d:::: <<<<\n");
}
/*
__device__ void takeString(void* p, int action)
{
if (threadIdx.x != 0 || blockIdx.x != 0 || threadIdx.y != 0 || blockIdx.y != 0) return;
if (VERBOSE)
{
if (action==1)
printf("d: caller: %s\n",(char*)p);
else if (action==2)
printf("d: callee: %s\n",(char*)p);
else if (action==3)
printf("d: return: %s\n",(char*)p);
else
printf("d: undefined: %s\n",(char*)p);
}
return;
}
*/
__device__ void cxtprint(int id)
{
if (id<0)
return;
printf("d::: requested context id: %d out of %d\n", id, cHeight);
for (int i = 0; i< CALL_PATH_LEN_DEVICE && contextDic[id][i].id != -1 ; i++)
{
printf("d::::::: current context [%d][%d]: %d, %d, %d\n", id, i, contextDic[id][i].id, contextDic[id][i].sline, contextDic[id][i].scolm) ;
}
return;
}
__device__ void cxtcpy( CallSite_t* dst, CallSite_t* src , int height) //context copy
{
int i;
for( i=0; i< height; i++)
dst[i] = src[i];
// assert(i<CALL_PATH_LEN_DEVICE && "code: e56: call stack too deep");
dst[i].id = -1; //to mark the ending of one context
return;
}
__device__ bool cxtcmp( CallSite_t* dst, CallSite_t* src, int height)
{
for( int i=0; i< height; i++)
if ( dst[i].id == src[i].id ) // && dst[i].id == src[i].id &&
continue;
else
return false;
return true;
}
__device__ int getContextID(void* p_stackzone)
{ //shared by all treahds, there are races
//you can manually to take care of serialization?
// if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y != 0 ) return -2; //DEBUG
int bid = blockIdx.x + blockIdx.y * gridDim.x;
int tid = threadIdx.y * blockDim.x + threadIdx.x;
// int offset = bid*blockDim.x*blockDim.y+tid;
// CallSite_t* callStack = (CallSite_t*) (&(globalCallStack[offset*CALL_PATH_LEN_DEVICE]));
// int &height = stackHeight[offset];
CallSite_t* callStack = (CallSite_t*) p_stackzone;
int bytesPerThread = (CALL_PATH_LEN_DEVICE*sizeof(CallSite_t));
int* temp = (int*)( (char*)p_stackzone + bytesPerThread+16); //offset by 8 to be safe, need to be consistent
int &height = *temp;
if ( height ==0) //it is possible that call stack is still empty
return -1;
if (cHeight==0)// the first ever context in the dic
{
// if (CALLPATHVERBOSE)
// printf("d::: the very first context in dic, depth=%d\n", height);
cxtcpy(contextDic[0], callStack, height );
cHeight=1;
return 0;
}
// something already exists
// if (CALLPATHVERBOSE)
// {
// printf("d::: going to match existing items in context dic\n");
// printf("d::: number of existing contexts: %d\n", cHeight);
// }
int i;
for (i = 0; i<cHeight; i++)
{
if ( cxtcmp( contextDic[i], callStack, height ) ) //yes, found
{
// if (CALLPATHVERBOSE)
// printf("d::: matched, returning %d, depth=%d\n",i, height);
return i;
}
}
// if (CALLPATHVERBOSE)
// printf("d::: not found, value of i: %d\n", i);
// assert (i< TOTAL_NUMBER_CONTEXT && "code:e34: Not enough space for Context Dic, index i");
// printCallStack();
cxtcpy(contextDic[i], callStack, height );
cHeight = i+1;
// assert (cHeight < TOTAL_NUMBER_CONTEXT && "code:e41: Not enough space for Context Dic, cHeight");
// if (CALLPATHVERBOSE)
// printf("d::: inserted new one: id = %d, depth=%d\n", i, height);
return i;
}
__device__ void passBasicBlock(int tmp /*pointer to block name*/, int action, int sline, int scolm, void* p_stackzone)
{
if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs
return;
int map = __ballot(1);
//int map = __activemask();
int numActive = __popc(map);
if ( buffer_oN_DeViCe == NULL)
return;
if (numActive==32)
{
//then choose one thread to write numbers
int tid = threadIdx.x + threadIdx.y *blockDim.x;
if (tid%32==0)
{
//do the writing
// printf("I will write for my warp tid=(%d, %d)\n", threadIdx.x, threadIdx.y);
int bid = atomicAdd(&bbccnntt, 1);
unsigned long long key=0;
BBlog_t* bblog = (BBlog_t*) buffer_oN_DeViCe;
bblog[bid].key = key;
bblog[bid].tidx = (short)threadIdx.x;
bblog[bid].tidy = (short)threadIdx.y;
bblog[bid].bidx = (short)blockIdx.x;
bblog[bid].bidy = (short)blockIdx.y;
bblog[bid].sline = sline;
bblog[bid].scolm = scolm;
bblog[bid].cid = getContextID(p_stackzone);
}
}
else
{
//every thread needs to write
//printf("I will write for my self tid=(%d, %d)\n", threadIdx.x, threadIdx.y);
int bid = atomicAdd(&bbccnntt, 1);
unsigned long long key=0;
BBlog_t* bblog = (BBlog_t*) buffer_oN_DeViCe;
bblog[bid].key = key;
bblog[bid].tidx = (short)threadIdx.x;
bblog[bid].tidy = (short)threadIdx.y;
bblog[bid].bidx = (short)blockIdx.x;
bblog[bid].bidy = (short)blockIdx.y;
bblog[bid].sline = sline;
bblog[bid].scolm = scolm;
bblog[bid].cid = getContextID(p_stackzone);
}
return;
}
/*
__device__ void passBasicBlock(int tmp , int action, int sline, int scolm, void* p_stackzone)
//__device__ void passBasicBlock(void* p , int action, int sline, int scolm, void* p_stackzone)
{
if ( buffer_oN_DeViCe == NULL)
return;
assert ( (bbccnntt < BUFFERSIZE/24 - 128) && "code: e317: too many entries to the buffer" ); //DO NOT COMMENT OUT
if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs
return;
// if (threadIdx.x + blockIdx.x + threadIdx.y + blockIdx.y == 0)
// getFuncID( (char*)p); //DEBUG
// printf("d: basic block: %s \ttid: (%d, %d)\n", str, threadIdx.x, threadIdx.y) ;
int bid = atomicAdd(&bbccnntt, 1);
if (bid > BUFFERSIZE/sizeof(BBlog_t) - 128) //overflow protection
return;
// for(int i=0; *(str+i) != 0; i++)
// {
// printf("%c", *(str+i) );
// }
// printf("\n");
unsigned long long key=0;
int cnt = 0;
long long factor = 1;
char* str = (char*)p;
for(int i=0; *(str+i) != 0; i++)
{
int ascii = (int)(*(str+i)) ;
if (ascii<48 || ascii > 123)
continue;
key += ascii*factor;
factor *= CONSTANCE;
// printf("%d\t", (int)(*(str+i)) );
// printf("key of %s is \t %llu\n", str, key);
}
// printf("key of %s is \t %llu\n", str, key);
// printf("\n");
BBlog_t* bblog = (BBlog_t*) buffer_oN_DeViCe;
// bblog[bid].key = key;
bblog[bid].tidx = (short)threadIdx.x;
bblog[bid].tidy = (short)threadIdx.y;
bblog[bid].bidx = (short)blockIdx.x;
bblog[bid].bidy = (short)blockIdx.y;
bblog[bid].sline = sline;
bblog[bid].scolm = scolm;
// bblog[bid].cid = getContextID(p_stackzone);
//printf("d:: context ID: %d\n", bblog[bid].cid);
// if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 )
// printf("d:: context ID= %d\n", bblog[bid].cid);
return;
}
*/
__device__ void storeLines(void* p, short size/*bytes*/, short line, short colmn, short op /*load or store*/, void* p_stackzone)
{
if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs
return;
int map = __ballot(1);
//int map = __activemask();
int numActive = __popc(map);
if ( ccnntt > (int)(((long)BUFFERSIZE)/24) - 128*100)
return; //DEBUG
assert ( (ccnntt < BUFFERSIZE/24 - 128) && "code: e31: too many entries to the buffer"); //DO NOT COMMENT OUT
//d_trace[bid].bidx = blockIdx.x;
//d_trace[bid].tidx = threadIdx.x;
//d_trace[bid].ea = p;
//d_trace[bid].bytes = size;
//printf(" d : bid = %d from (%d,%d) (%d,%d) \n", bid, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y);
//assert ( (buffer_oN_DeViCe!=NULL) && "buffer_oN_DeViCe is null");
if (buffer_oN_DeViCe==NULL)
return;
int bid = atomicAdd(&ccnntt, 1);
//printf ("%d\n", bid);
//__syncthreads(); //IMPORTANT to sync here
if(true)
{
int tid = threadIdx.x + threadIdx.y *blockDim.x;
//if ( tid%32==0 || true)
if (true)
{
short* buffer_oN_DeViCe_short = (short*) buffer_oN_DeViCe;
long* buffer_oN_DeViCe_long = (long*) buffer_oN_DeViCe;
buffer_oN_DeViCe_short[bid*12+0] = (short)blockIdx.x;
buffer_oN_DeViCe_short[bid*12+1] = (short)blockIdx.y;
buffer_oN_DeViCe_short[bid*12+2] = (short)threadIdx.x;
buffer_oN_DeViCe_short[bid*12+3] = (short)threadIdx.y;
buffer_oN_DeViCe_long[bid*3+1] = (long)p;
buffer_oN_DeViCe_short[bid*12+8] = size;
buffer_oN_DeViCe_short[bid*12+9] = line;
buffer_oN_DeViCe_short[bid*12+10] = colmn;
buffer_oN_DeViCe_short[bid*12+11] = op;
getContextID(p_stackzone);
}
}
}
/*
__device__ void dumpLines(void)
{
if (threadIdx.x != 0 || blockIdx.x != 0 || threadIdx.y != 0 || blockIdx.y != 0) return;
int ii;
for(ii=1; ii< ccnntt; ii=ii+6)
{
// printf("d: %d Bytes at %p by (%d, %d)\n", buffer_oN_DeViCe[ii*6+4], buffer_oN_DeViCe[ii*6+2], buffer_oN_DeViCe[ii*6], buffer_oN_DeViCe[ii*6+1] );
}
// printf("\n" );
// const char* ss = "this is the end";
// void *ps = ss;
// takeString(ps);
//Or, this also works.
// char s[200] = "this is the end"; void *ps = &(s); takeString(s);
// printf("try mmap\n" );
// void* ptr = mmap(NULL, 1000, PROT_READ | PROT_WRITE| PROT_EXEC, MAP_SHARED, -1, 0);
// printf("%p\n",ptr );
}
*/
__device__ void print1(int a)
{
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0)
printf("d: print1: %d\n", a);
return;
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE)
{
if (a==1)
printf("d: load by CTA (%d,%d)\n", blockIdx.x, blockIdx.y);
else if (a==2)
printf("d: store by CTA (%d,%d)\n", blockIdx.x, blockIdx.y);
else
printf("d: !!! undefined !!! \n" );
}
}
/*
__device__ void print2()
{
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE)
printf("d: store by CTA (%d,%d)\n", blockIdx.x, blockIdx.y);
}
*/
__device__ void print3(int line, int col)
{
return;
if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE)
printf("d: source line: %d\t column: %d by CTA (%d,%d)\n", line, col, blockIdx.x, blockIdx.y);
}
__device__ void print4(void* p)
{
//if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE)
printf("d: print4: %p\n", p);
}
__device__ void print5(void* p, int bits, int sline, int scolm, int op, void* p_stackzone)
{
// if ( (blockIdx.x + blockIdx.y* gridDim.x) * (blockDim.x * blockDim.y) >= 32*128) // no more than 128 warps
// return;
// printf("d: ea: %p by (%d,%d) (%d,%d), CTA id = %d\n",p, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y , (blockIdx.x + blockIdx.y* gridDim.x));
/*if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs
return;*/
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
int gtid = tid + blockDim.x * blockDim.y * bid;
Entry_t* entry = (Entry_t*) p;
storeLines(p, (short)(bits/8), (short)sline, (short) scolm, (short)op, p_stackzone);
// printf("d: ea: %p by (%d,%d) (%d,%d), CTA id = %d\n",p, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y , (blockIdx.x + blockIdx.y* gridDim.x));
// printf("d: ea: %p by (%d,%d) (%d,%d)\n",p, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y );
}
////
__device__ void RetKernel(void* p_stackzone)
{
if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs
return;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
int tid = threadIdx.x + threadIdx.y *blockDim.x;
__syncthreads(); //IMPORTANT to sync here
int rank = -1;
if ( tid == 0)
{
// printf(" CTA\t%d\treleases:\t%p\n", bid, stackzone);
// atomicAdd( &alive, -1);
if (p_stackzone!=NULL)
{
free(p_stackzone);
rank = atomicAdd( &aliveCTA, -1);
printf("CTA\t%d\texits, total remains\t%d\n", bid, rank);
}
else
printf("d:: p_stack is hacked!!\n");
}
__syncthreads();
if (threadIdx.x + threadIdx.y == 0 && rank ==1 )
{
printf("d: in RetKernel...\n");
// for (int kk=0; kk< cHeight; kk++)
// cxtprint( kk );
if (true)
{ //memory
short* buffer_oN_DeViCe_short = (short*) buffer_oN_DeViCe;
buffer_oN_DeViCe_short[0+0] = blockDim.x; // Be consistent with print.cpp, dumpTrace()
buffer_oN_DeViCe_short[0+1] = blockDim.y;
buffer_oN_DeViCe_short[0+2] = gridDim.x;
buffer_oN_DeViCe_short[0+3] = gridDim.y;
printf("d: Kernel Returns: collected [ %llu ] memory entries. \n" , ccnntt);
printf("d: Kernel Returns: collected [ %llu ] memory entries. \n" , bbccnntt);
long* buffer_oN_DeViCe_long = (long*) buffer_oN_DeViCe;
buffer_oN_DeViCe_long[0+1] = ccnntt;
}
else
{ //branch
BBlog_t* bbbuffer_oN_DeViCe_short = (BBlog_t*) buffer_oN_DeViCe;
bbbuffer_oN_DeViCe_short[0].bidx = blockDim.x; // Be consistent with print.cpp, dumpTrace()
bbbuffer_oN_DeViCe_short[0].bidy = blockDim.y;
bbbuffer_oN_DeViCe_short[0].tidx = gridDim.x;
bbbuffer_oN_DeViCe_short[0].tidy = gridDim.y;
bbbuffer_oN_DeViCe_short[0].key = bbccnntt;
bbbuffer_oN_DeViCe_short[0].sline = 0;
bbbuffer_oN_DeViCe_short[0].scolm = 0;
printf("d: Kernel Returns: collected [ %llu ] BB logs. \n" , bbccnntt);
printf("d: Kernel Returns: collected [ %llu ] BB logs. \n" , ccnntt);
}
unsigned long offset1 = ((UNIQUE_FUNC_DEVICE* FUNC_NAME_LEN*sizeof(char))/1024+1)*1024;
unsigned long offset2 = ((TOTAL_NUMBER_CONTEXT * CALL_PATH_LEN_DEVICE* sizeof(CallSite_t))/1024+1)*1024 + offset1;
printf("size of function dic: %d %d %lu -> %lu , rounded to %lu\n", UNIQUE_FUNC_DEVICE, FUNC_NAME_LEN, sizeof(char), UNIQUE_FUNC_DEVICE*FUNC_NAME_LEN*sizeof(char), offset1 );
printf("size of context dic: %d %d %lu -> %lu , rounded to %lu\n", TOTAL_NUMBER_CONTEXT, CALL_PATH_LEN_DEVICE, sizeof(CallSite_t), TOTAL_NUMBER_CONTEXT* CALL_PATH_LEN_DEVICE* sizeof(CallSite_t) , offset2);
//function dic is the last,
//context dic is second to last
void* ptr;
ptr = (void*)( buffer_oN_DeViCe + (BUFFERSIZE - offset1)/sizeof(int)) ; //operate on a int*, not a void*
memcpy( ptr, funcDic, UNIQUE_FUNC_DEVICE *FUNC_NAME_LEN*sizeof(char) );
ptr = (void*)(buffer_oN_DeViCe + (BUFFERSIZE - offset2)/sizeof(int)) ; //operate on a int*, not a void*
memcpy( ptr, contextDic, TOTAL_NUMBER_CONTEXT * CALL_PATH_LEN_DEVICE*sizeof(CallSite_t) );
/* BBlog_t* tmpbb = (BBlog_t*) buffer_oN_DeViCe;
for (int i=1; i<bbccnntt; i++)
{
printf(" %d\t", tmpbb[i].bidx);
printf(" %d\t", tmpbb[i].bidy);
printf(" %d\t", tmpbb[i].tidx);
printf(" %d\t", tmpbb[i].tidy);
printf(" %llu\t", tmpbb[i].key);
printf(" %d\t", tmpbb[i].sline);
printf(" %d\t", tmpbb[i].scolm);
printf("\n");
}
*/
/*
Entry_t* tp = (Entry_t*) buffer_oN_DeViCe;
int i;
for (i=1; i<ccnntt; i++)
{
printf(" d: bid (%d,%d) \ttid (%d,%d) \t%p\t%d,%d\t%d\n", tp[i].bidx, tp[i].bidy, tp[i].tidx, tp[i].tidy, tp[i].ea, tp[i].sline, tp[i].scolm, tp[i].op);
}
*/
ccnntt = 1; //reset, prepares for next kernel call
bbccnntt = 1; //reset, prepares for next kernel call
}//end of if
}
|
954289a8a8744aa29e88ca5da45e51e280657022.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_xvel_plus_2_back [3][2];
static int dims_update_halo_kernel2_xvel_plus_2_back_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_xvel_plus_2_back_gpu(ACC<double> &xvel0,
ACC<double> &xvel1,
const int* fields)
{
if(fields[FIELD_XVEL0] == 1) xvel0(0,0,0) = xvel0(0,0,2);
if(fields[FIELD_XVEL1] == 1) xvel1(0,0,0) = xvel1(0,0,2);
}
__global__ void ops_update_halo_kernel2_xvel_plus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_xvel_plus_2_back[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_xvel_plus_2_back[0][0] * dims_update_halo_kernel2_xvel_plus_2_back[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_xvel_plus_2_back[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_xvel_plus_2_back[1][0] * dims_update_halo_kernel2_xvel_plus_2_back[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_xvel_plus_2_back[0][0], dims_update_halo_kernel2_xvel_plus_2_back[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_xvel_plus_2_back[1][0], dims_update_halo_kernel2_xvel_plus_2_back[1][1], arg1);
update_halo_kernel2_xvel_plus_2_back_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,32)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(32,"update_halo_kernel2_xvel_plus_2_back");
OPS_kernels[32].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_xvel_plus_2_back_h[0][0] || ydim0 != dims_update_halo_kernel2_xvel_plus_2_back_h[0][1] || xdim1 != dims_update_halo_kernel2_xvel_plus_2_back_h[1][0] || ydim1 != dims_update_halo_kernel2_xvel_plus_2_back_h[1][1]) {
dims_update_halo_kernel2_xvel_plus_2_back_h[0][0] = xdim0;
dims_update_halo_kernel2_xvel_plus_2_back_h[0][1] = ydim0;
dims_update_halo_kernel2_xvel_plus_2_back_h[1][0] = xdim1;
dims_update_halo_kernel2_xvel_plus_2_back_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_xvel_plus_2_back, dims_update_halo_kernel2_xvel_plus_2_back_h, sizeof(dims_update_halo_kernel2_xvel_plus_2_back)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[32].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_2_back), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[32].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[32].mpi_time += t2-t1;
OPS_kernels[32].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[32].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 32;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 32;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_plus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(32,"update_halo_kernel2_xvel_plus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
|
954289a8a8744aa29e88ca5da45e51e280657022.cu
|
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_xvel_plus_2_back [3][2];
static int dims_update_halo_kernel2_xvel_plus_2_back_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_xvel_plus_2_back_gpu(ACC<double> &xvel0,
ACC<double> &xvel1,
const int* fields)
{
if(fields[FIELD_XVEL0] == 1) xvel0(0,0,0) = xvel0(0,0,2);
if(fields[FIELD_XVEL1] == 1) xvel1(0,0,0) = xvel1(0,0,2);
}
__global__ void ops_update_halo_kernel2_xvel_plus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_xvel_plus_2_back[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_xvel_plus_2_back[0][0] * dims_update_halo_kernel2_xvel_plus_2_back[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_xvel_plus_2_back[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_xvel_plus_2_back[1][0] * dims_update_halo_kernel2_xvel_plus_2_back[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_xvel_plus_2_back[0][0], dims_update_halo_kernel2_xvel_plus_2_back[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_xvel_plus_2_back[1][0], dims_update_halo_kernel2_xvel_plus_2_back[1][1], arg1);
update_halo_kernel2_xvel_plus_2_back_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,32)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(32,"update_halo_kernel2_xvel_plus_2_back");
OPS_kernels[32].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_xvel_plus_2_back_h[0][0] || ydim0 != dims_update_halo_kernel2_xvel_plus_2_back_h[0][1] || xdim1 != dims_update_halo_kernel2_xvel_plus_2_back_h[1][0] || ydim1 != dims_update_halo_kernel2_xvel_plus_2_back_h[1][1]) {
dims_update_halo_kernel2_xvel_plus_2_back_h[0][0] = xdim0;
dims_update_halo_kernel2_xvel_plus_2_back_h[0][1] = ydim0;
dims_update_halo_kernel2_xvel_plus_2_back_h[1][0] = xdim1;
dims_update_halo_kernel2_xvel_plus_2_back_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_xvel_plus_2_back, dims_update_halo_kernel2_xvel_plus_2_back_h, sizeof(dims_update_halo_kernel2_xvel_plus_2_back)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[32].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_xvel_plus_2_back<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[32].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[32].mpi_time += t2-t1;
OPS_kernels[32].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[32].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_plus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 32;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 32;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_plus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(32,"update_halo_kernel2_xvel_plus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
|
2a336c14a95b3e9b23510d214719acea780f86ae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDAImageUtil.h"
#include "mlibCuda.h"
#define T_PER_BLOCK 16
#define MINF __int_as_float(0xff800000)
template<class T> void CUDAImageUtil::copy(T* d_output, T* d_input, unsigned int width, unsigned int height) {
MLIB_CUDA_SAFE_CALL(hipMemcpy(d_output, d_input, sizeof(T)*width*height, hipMemcpyDeviceToDevice));
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Resample Float Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
inline __device__ float bilinearInterpolationFloat(float x, float y, const float* d_input, unsigned int imageWidth, unsigned int imageHeight)
{
const int2 p00 = make_int2(floor(x), floor(y));
const int2 p01 = p00 + make_int2(0.0f, 1.0f);
const int2 p10 = p00 + make_int2(1.0f, 0.0f);
const int2 p11 = p00 + make_int2(1.0f, 1.0f);
const float alpha = x - p00.x;
const float beta = y - p00.y;
float s0 = 0.0f; float w0 = 0.0f;
if (p00.x < imageWidth && p00.y < imageHeight) { float v00 = d_input[p00.y*imageWidth + p00.x]; if (v00 != MINF) { s0 += (1.0f - alpha)*v00; w0 += (1.0f - alpha); } }
if (p10.x < imageWidth && p10.y < imageHeight) { float v10 = d_input[p10.y*imageWidth + p10.x]; if (v10 != MINF) { s0 += alpha *v10; w0 += alpha; } }
float s1 = 0.0f; float w1 = 0.0f;
if (p01.x < imageWidth && p01.y < imageHeight) { float v01 = d_input[p01.y*imageWidth + p01.x]; if (v01 != MINF) { s1 += (1.0f - alpha)*v01; w1 += (1.0f - alpha); } }
if (p11.x < imageWidth && p11.y < imageHeight) { float v11 = d_input[p11.y*imageWidth + p11.x]; if (v11 != MINF) { s1 += alpha *v11; w1 += alpha; } }
const float p0 = s0 / w0;
const float p1 = s1 / w1;
float ss = 0.0f; float ww = 0.0f;
if (w0 > 0.0f) { ss += (1.0f - beta)*p0; ww += (1.0f - beta); }
if (w1 > 0.0f) { ss += beta *p1; ww += beta; }
if (ww > 0.0f) return ss / ww;
else return MINF;
}
//template<class T>
//__global__ void resample_Kernel(T* d_output, T* d_input, unsigned int inputWidth, unsigned int inputHeight, unsigned int outputWidth, unsigned int outputHeight)
//{
// const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
// const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
//
// if (x < outputWidth && y < outputHeight)
// {
// const float scaleWidth = (float)(inputWidth - 1) / (float)(outputWidth - 1);
// const float scaleHeight = (float)(inputHeight - 1) / (float)(outputHeight - 1);
//
// const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
// const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
//
// if (xInput < inputWidth && yInput < inputHeight)
// {
// if (std::is_same<T, float>::value) {
// d_output[y*outputWidth + x] = (T)bilinearInterpolationFloat(x*scaleWidth, y*scaleHeight, (float*)d_input, inputWidth, inputHeight);
// }
// else if (std::is_same<T, uchar4>::value) {
// d_output[y*outputWidth + x] = d_input[yInput*inputWidth + xInput];
// }
// else {
// //static_assert(false, "bla");
// }
// }
// }
//}
//
//template<class T> void CUDAImageUtil::resample(T* d_output, unsigned int outputWidth, unsigned int outputHeight, T* d_input, unsigned int inputWidth, unsigned int inputHeight) {
//
// const dim3 gridSize((outputWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (outputHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
// const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
//
// resample_Kernel << <gridSize, blockSize >> >(d_output, d_input, inputWidth, inputHeight, outputWidth, outputHeight);
//
//#ifdef _DEBUG
// MLIB_CUDA_SAFE_CALL(hipDeviceSynchronize());
// MLIB_CUDA_CHECK_ERR(__FUNCTION__);
//#endif
//}
__global__ void resampleFloat_Kernel(float* d_output, unsigned int outputWidth, unsigned int outputHeight, const float* d_input, unsigned int inputWidth, unsigned int inputHeight)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < outputWidth && y < outputHeight)
{
const float scaleWidth = (float)(inputWidth-1) / (float)(outputWidth-1);
const float scaleHeight = (float)(inputHeight-1) / (float)(outputHeight-1);
const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
if (xInput < inputWidth && yInput < inputHeight) {
d_output[y*outputWidth + x] = d_input[yInput*inputWidth + xInput];
//d_output[y*outputWidth + x] = bilinearInterpolationFloat(x*scaleWidth, y*scaleHeight, d_input, inputWidth, inputHeight);
}
}
}
void CUDAImageUtil::resampleFloat(float* d_output, unsigned int outputWidth, unsigned int outputHeight, const float* d_input, unsigned int inputWidth, unsigned int inputHeight) {
const dim3 gridSize((outputWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (outputHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
resampleFloat_Kernel << <gridSize, blockSize >> >(d_output, outputWidth, outputHeight, d_input, inputWidth, inputHeight);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(hipDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
__global__ void resampleFloat4_Kernel(float4* d_output, unsigned int outputWidth, unsigned int outputHeight, const float4* d_input, unsigned int inputWidth, unsigned int inputHeight)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < outputWidth && y < outputHeight)
{
const float scaleWidth = (float)(inputWidth-1) / (float)(outputWidth-1);
const float scaleHeight = (float)(inputHeight-1) / (float)(outputHeight-1);
const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
if (xInput < inputWidth && yInput < inputHeight) {
d_output[y*outputWidth + x] = d_input[yInput*inputWidth + xInput];
//d_output[y*outputWidth + x] = bilinearInterpolationFloat(x*scaleWidth, y*scaleHeight, d_input, inputWidth, inputHeight);
}
}
}
void CUDAImageUtil::resampleFloat4(float4* d_output, unsigned int outputWidth, unsigned int outputHeight, const float4* d_input, unsigned int inputWidth, unsigned int inputHeight) {
const dim3 gridSize((outputWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (outputHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
resampleFloat4_Kernel << <gridSize, blockSize >> >(d_output, outputWidth, outputHeight, d_input, inputWidth, inputHeight);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(hipDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
__global__ void resampleUCHAR4_Kernel(uchar4* d_output, unsigned int outputWidth, unsigned int outputHeight, const uchar4* d_input, unsigned int inputWidth, unsigned int inputHeight)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < outputWidth && y < outputHeight)
{
const float scaleWidth = (float)(inputWidth-1) / (float)(outputWidth-1);
const float scaleHeight = (float)(inputHeight-1) / (float)(outputHeight-1);
const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
if (xInput < inputWidth && yInput < inputHeight) {
d_output[y*outputWidth + x] = d_input[yInput*inputWidth + xInput];
}
}
}
void CUDAImageUtil::resampleUCHAR4(uchar4* d_output, unsigned int outputWidth, unsigned int outputHeight, const uchar4* d_input, unsigned int inputWidth, unsigned int inputHeight) {
const dim3 gridSize((outputWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (outputHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
resampleUCHAR4_Kernel << <gridSize, blockSize >> >(d_output, outputWidth, outputHeight, d_input, inputWidth, inputHeight);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(hipDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Color to Intensity
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__host__ __device__
float convertToIntensity(const uchar4& c) {
return (0.299f*c.x + 0.587f*c.y + 0.114f*c.z) / 255.0f;
}
__global__ void convertUCHAR4ToIntensityFloat_Kernel(float* d_output, const uchar4* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
d_output[y*width + x] = convertToIntensity(d_input[y*width + x]);
}
}
void CUDAImageUtil::convertUCHAR4ToIntensityFloat(float* d_output, const uchar4* d_input, unsigned int width, unsigned int height) {
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
convertUCHAR4ToIntensityFloat_Kernel << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(hipDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
__global__ void resampleToIntensity_Kernel(float* d_output, unsigned int outputWidth, unsigned int outputHeight, const uchar4* d_input, unsigned int inputWidth, unsigned int inputHeight)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < outputWidth && y < outputHeight)
{
const float scaleWidth = (float)(inputWidth-1) / (float)(outputWidth-1);
const float scaleHeight = (float)(inputHeight-1) / (float)(outputHeight-1);
const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
if (xInput < inputWidth && yInput < inputHeight) {
d_output[y*outputWidth + x] = convertToIntensity(d_input[yInput*inputWidth + xInput]);
}
}
}
void CUDAImageUtil::resampleToIntensity(float* d_output, unsigned int outputWidth, unsigned int outputHeight, const uchar4* d_input, unsigned int inputWidth, unsigned int inputHeight) {
const dim3 gridSize((outputWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (outputHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
resampleToIntensity_Kernel << <gridSize, blockSize >> >(d_output, outputWidth, outputHeight, d_input, inputWidth, inputHeight);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(hipDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// derivatives
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void computeIntensityDerivatives_Kernel(float2* d_output, const float* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height)
{
d_output[y*width + x] = make_float2(MINF, MINF);
//derivative
if (x > 0 && x < width - 1 && y > 0 && y < height - 1)
{
float pos00 = d_input[(y - 1)*width + (x - 1)]; if (pos00 == MINF) return;
float pos01 = d_input[(y - 0)*width + (x - 1)]; if (pos01 == MINF) return;
float pos02 = d_input[(y + 1)*width + (x - 1)]; if (pos02 == MINF) return;
float pos10 = d_input[(y - 1)*width + (x - 0)]; if (pos10 == MINF) return;
//float pos11 = d_input[(y-0)*width + (x-0)]; if (pos11 == MINF) return;
float pos12 = d_input[(y + 1)*width + (x - 0)]; if (pos12 == MINF) return;
float pos20 = d_input[(y - 1)*width + (x + 1)]; if (pos20 == MINF) return;
float pos21 = d_input[(y - 0)*width + (x + 1)]; if (pos21 == MINF) return;
float pos22 = d_input[(y + 1)*width + (x + 1)]; if (pos22 == MINF) return;
float resU = (-1.0f)*pos00 + (1.0f)*pos20 +
(-2.0f)*pos01 + (2.0f)*pos21 +
(-1.0f)*pos02 + (1.0f)*pos22;
resU /= 8.0f;
float resV = (-1.0f)*pos00 + (-2.0f)*pos10 + (-1.0f)*pos20 +
(1.0f)*pos02 + (2.0f)*pos12 + (1.0f)*pos22;
resV /= 8.0f;
d_output[y*width + x] = make_float2(resU, resV);
}
}
}
void CUDAImageUtil::computeIntensityDerivatives(float2* d_output, const float* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
computeIntensityDerivatives_Kernel << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(hipDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
__global__ void computeIntensityGradientMagnitude_Kernel(float* d_output, const float* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height)
{
d_output[y*width + x] = MINF;
//derivative
if (x > 0 && x < width - 1 && y > 0 && y < height - 1)
{
float pos00 = d_input[(y - 1)*width + (x - 1)]; if (pos00 == MINF) return;
float pos01 = d_input[(y - 0)*width + (x - 1)]; if (pos01 == MINF) return;
float pos02 = d_input[(y + 1)*width + (x - 1)]; if (pos02 == MINF) return;
float pos10 = d_input[(y - 1)*width + (x - 0)]; if (pos10 == MINF) return;
//float pos11 = d_input[(y-0)*width + (x-0)]; if (pos11 == MINF) return;
float pos12 = d_input[(y + 1)*width + (x - 0)]; if (pos12 == MINF) return;
float pos20 = d_input[(y - 1)*width + (x + 1)]; if (pos20 == MINF) return;
float pos21 = d_input[(y - 0)*width + (x + 1)]; if (pos21 == MINF) return;
float pos22 = d_input[(y + 1)*width + (x + 1)]; if (pos22 == MINF) return;
float resU = (-1.0f)*pos00 + (1.0f)*pos20 +
(-2.0f)*pos01 + (2.0f)*pos21 +
(-1.0f)*pos02 + (1.0f)*pos22;
//resU /= 8.0f;
float resV = (-1.0f)*pos00 + (-2.0f)*pos10 + (-1.0f)*pos20 +
(1.0f)*pos02 + (2.0f)*pos12 + (1.0f)*pos22;
//resV /= 8.0f;
d_output[y*width + x] = sqrt(resU * resU + resV * resV);
}
}
}
void CUDAImageUtil::computeIntensityGradientMagnitude(float* d_output, const float* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
computeIntensityGradientMagnitude_Kernel << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(hipDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Convert Depth to Camera Space Positions
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void convertDepthFloatToCameraSpaceFloat4_Kernel(float4* d_output, const float* d_input, float4x4 intrinsicsInv, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
d_output[y*width + x] = make_float4(MINF, MINF, MINF, MINF);
float depth = d_input[y*width + x];
if (depth != MINF)
{
float4 cameraSpace(intrinsicsInv*make_float4((float)x*depth, (float)y*depth, depth, depth));
d_output[y*width + x] = make_float4(cameraSpace.x, cameraSpace.y, cameraSpace.w, 1.0f);
//d_output[y*width + x] = make_float4(depthCameraData.kinectDepthToSkeleton(x, y, depth), 1.0f);
}
}
}
void CUDAImageUtil::convertDepthFloatToCameraSpaceFloat4(float4* d_output, const float* d_input, const float4x4& intrinsicsInv, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
convertDepthFloatToCameraSpaceFloat4_Kernel << <gridSize, blockSize >> >(d_output, d_input, intrinsicsInv, width, height);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(hipDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Compute Normal Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void computeNormals_Kernel(float4* d_output, const float4* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
d_output[y*width + x] = make_float4(MINF, MINF, MINF, MINF);
if (x > 0 && x < width - 1 && y > 0 && y < height - 1)
{
const float4 CC = d_input[(y + 0)*width + (x + 0)];
const float4 PC = d_input[(y + 1)*width + (x + 0)];
const float4 CP = d_input[(y + 0)*width + (x + 1)];
const float4 MC = d_input[(y - 1)*width + (x + 0)];
const float4 CM = d_input[(y + 0)*width + (x - 1)];
if (CC.x != MINF && PC.x != MINF && CP.x != MINF && MC.x != MINF && CM.x != MINF)
{
const float3 n = cross(make_float3(PC) - make_float3(MC), make_float3(CP) - make_float3(CM));
const float l = length(n);
if (l > 0.0f)
{
d_output[y*width + x] = make_float4(n / -l, 0.0f);
}
}
}
}
void CUDAImageUtil::computeNormals(float4* d_output, const float4* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
computeNormals_Kernel << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void computeNormalsSobel_Kernel(float4* d_output, const float4* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
d_output[y*width + x] = make_float4(MINF, MINF, MINF, MINF);
if (x > 0 && x < width - 1 && y > 0 && y < height - 1)
{
float4 pos00 = d_input[(y - 1)*width + (x - 1)]; if (pos00.x == MINF) return;
float4 pos01 = d_input[(y - 0)*width + (x - 1)]; if (pos01.x == MINF) return;
float4 pos02 = d_input[(y + 1)*width + (x - 1)]; if (pos02.x == MINF) return;
float4 pos10 = d_input[(y - 1)*width + (x - 0)]; if (pos10.x == MINF) return;
//float4 pos11 = d_input[(y-0)*width + (x-0)]; if (pos11.x == MINF) return;
float4 pos12 = d_input[(y + 1)*width + (x - 0)]; if (pos12.x == MINF) return;
float4 pos20 = d_input[(y - 1)*width + (x + 1)]; if (pos20.x == MINF) return;
float4 pos21 = d_input[(y - 0)*width + (x + 1)]; if (pos21.x == MINF) return;
float4 pos22 = d_input[(y + 1)*width + (x + 1)]; if (pos22.x == MINF) return;
float4 resU = (-1.0f)*pos00 + (1.0f)*pos20 +
(-2.0f)*pos01 + (2.0f)*pos21 +
(-1.0f)*pos02 + (1.0f)*pos22;
float4 resV = (-1.0f)*pos00 + (-2.0f)*pos10 + (-1.0f)*pos20 +
(1.0f)*pos02 + (2.0f)*pos12 + (1.0f)*pos22;
const float3 n = cross(make_float3(resU.x, resU.y, resU.z), make_float3(resV.x, resV.y, resV.z));
const float l = length(n);
if (l > 0.0f) d_output[y*width + x] = make_float4(n / l, 0.0f);
}
}
void CUDAImageUtil::computeNormalsSobel(float4* d_output, const float4* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
computeNormalsSobel_Kernel << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void convertNormalsFloat4ToUCHAR4_Kernel(uchar4* d_output, const float4* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
d_output[y*width + x] = make_uchar4(0, 0, 0, 0);
float4 p = d_input[y*width + x];
if (p.x != MINF)
{
p = (p + 1.0f) / 2.0f; // -> [0, 1]
d_output[y*width + x] = make_uchar4((uchar)round(p.x * 255), (uchar)round(p.y * 255), (uchar)round(p.z * 255), 0);
}
}
}
void CUDAImageUtil::convertNormalsFloat4ToUCHAR4(uchar4* d_output, const float4* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
convertNormalsFloat4ToUCHAR4_Kernel << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Joint Bilateral Filter
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
inline __device__ float gaussD(float sigma, int x, int y)
{
return exp(-((x*x + y*y) / (2.0f*sigma*sigma)));
}
inline __device__ float gaussR(float sigma, float dist)
{
return exp(-(dist*dist) / (2.0*sigma*sigma));
}
__global__ void bilateralFilterUCHAR4_Kernel(uchar4* d_output, uchar4* d_color, float* d_depth, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int kernelRadius = (int)ceil(2.0*sigmaD);
d_output[y*width + x] = d_color[y*width + x];
float3 sum = make_float3(0.0f, 0.0f, 0.0f);
float sumWeight = 0.0f;
const float depthCenter = d_depth[y*width + x];
if (depthCenter != MINF)
{
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const uchar4 cur = d_color[n*width + m];
const float currentDepth = d_depth[n*width + m];
if (currentDepth != MINF) {
const float weight = gaussD(sigmaD, m - x, n - y)*gaussR(sigmaR, currentDepth - depthCenter);
sumWeight += weight;
sum += weight*make_float3(cur.x, cur.y, cur.z);
}
}
}
}
if (sumWeight > 0.0f) {
float3 res = sum / sumWeight;
d_output[y*width + x] = make_uchar4((uchar)res.x, (uchar)res.y, (uchar)res.z, 255);
}
}
}
void CUDAImageUtil::jointBilateralFilterColorUCHAR4(uchar4* d_output, uchar4* d_input, float* d_depth, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
bilateralFilterUCHAR4_Kernel << <gridSize, blockSize >> >(d_output, d_input, d_depth, sigmaD, sigmaR, width, height);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void bilateralFilterFloat_Kernel(float* d_output, float* d_input, float* d_depth, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int kernelRadius = (int)ceil(2.0*sigmaD);
d_output[y*width + x] = MINF;
float sum = 0.0f;
float sumWeight = 0.0f;
const float depthCenter = d_depth[y*width + x];
if (depthCenter != MINF)
{
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float cur = d_input[n*width + m];
const float currentDepth = d_depth[n*width + m];
if (currentDepth != MINF && fabs(depthCenter - currentDepth) < sigmaR)
{ //const float weight = gaussD(sigmaD, m - x, n - y)*gaussR(sigmaR, currentDepth - depthCenter);
const float weight = gaussD(sigmaD, m - x, n - y);
sumWeight += weight;
sum += weight*cur;
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
}
void CUDAImageUtil::jointBilateralFilterFloat(float* d_output, float* d_input, float* d_depth, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
bilateralFilterFloat_Kernel << <gridSize, blockSize >> >(d_output, d_input, d_depth, sigmaD, sigmaR, width, height);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void adaptiveBilateralFilterIntensity_Kernel(float* d_output, const float* d_input, const float* d_depth, float sigmaD, float sigmaR, float adaptFactor, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
d_output[y*width + x] = MINF;
float sum = 0.0f;
float sumWeight = 0.0f;
const float depthCenter = d_depth[y*width + x];
if (depthCenter != MINF)
{
const float curSigma = sigmaD * adaptFactor / depthCenter;
const int kernelRadius = (int)ceil(2.0*curSigma);
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float cur = d_input[n*width + m];
const float currentDepth = d_depth[n*width + m];
if (currentDepth != MINF && fabs(depthCenter - currentDepth) < sigmaR)
{ //const float weight = gaussD(curSigma, m - x, n - y)*gaussR(sigmaR, currentDepth - depthCenter);
const float weight = gaussD(curSigma, m - x, n - y);
sumWeight += weight;
sum += weight*cur;
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
}
void CUDAImageUtil::adaptiveBilateralFilterIntensity(float* d_output, const float* d_input, const float* d_depth, float sigmaD, float sigmaR, float adaptFactor, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
adaptiveBilateralFilterIntensity_Kernel << <gridSize, blockSize >> >(d_output, d_input, d_depth, sigmaD, sigmaR, adaptFactor, width, height);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Erode Depth Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void erodeDepthMapDevice(float* d_output, float* d_input, int structureSize, int width, int height, float dThresh, float fracReq)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= 0 && x < width && y >= 0 && y < height)
{
unsigned int count = 0;
float oldDepth = d_input[y*width + x];
for (int i = -structureSize; i <= structureSize; i++)
{
for (int j = -structureSize; j <= structureSize; j++)
{
if (x + j >= 0 && x + j < width && y + i >= 0 && y + i < height)
{
float depth = d_input[(y + i)*width + (x + j)];
if (depth == MINF || depth == 0.0f || fabs(depth - oldDepth) > dThresh)
{
count++;
//d_output[y*width+x] = MINF;
//return;
}
}
}
}
unsigned int sum = (2 * structureSize + 1)*(2 * structureSize + 1);
if ((float)count / (float)sum >= fracReq) {
d_output[y*width + x] = MINF;
}
else {
d_output[y*width + x] = d_input[y*width + x];
}
}
}
void CUDAImageUtil::erodeDepthMap(float* d_output, float* d_input, int structureSize, unsigned int width, unsigned int height, float dThresh, float fracReq)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
erodeDepthMapDevice << <gridSize, blockSize >> >(d_output, d_input, structureSize, width, height, dThresh, fracReq);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Gauss Filter Float Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void gaussFilterDepthMapDevice(float* d_output, const float* d_input, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int kernelRadius = (int)ceil(2.0*sigmaD);
d_output[y*width + x] = MINF;
float sum = 0.0f;
float sumWeight = 0.0f;
const float depthCenter = d_input[y*width + x];
if (depthCenter != MINF)
{
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float currentDepth = d_input[n*width + m];
if (currentDepth != MINF && fabs(depthCenter - currentDepth) < sigmaR)
{
const float weight = gaussD(sigmaD, m - x, n - y);
sumWeight += weight;
sum += weight*currentDepth;
}
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
void CUDAImageUtil::gaussFilterDepthMap(float* d_output, const float* d_input, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
gaussFilterDepthMapDevice << <gridSize, blockSize >> >(d_output, d_input, sigmaD, sigmaR, width, height);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void gaussFilterIntensityDevice(float* d_output, const float* d_input, float sigmaD, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int kernelRadius = (int)ceil(2.0*sigmaD);
//d_output[y*width + x] = MINF;
float sum = 0.0f;
float sumWeight = 0.0f;
//const float center = d_input[y*width + x];
//if (center != MINF) {
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float current = d_input[n*width + m];
//if (current != MINF && fabs(center - current) < sigmaR) {
const float weight = gaussD(sigmaD, m - x, n - y);
sumWeight += weight;
sum += weight*current;
//}
}
}
}
//}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
void CUDAImageUtil::gaussFilterIntensity(float* d_output, const float* d_input, float sigmaD, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
gaussFilterIntensityDevice << <gridSize, blockSize >> >(d_output, d_input, sigmaD, width, height);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// adaptive gauss filter float map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void adaptiveGaussFilterDepthMap_Kernel(float* d_output, const float* d_input, float sigmaD, float sigmaR,
unsigned int width, unsigned int height, float adaptFactor)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
d_output[y*width + x] = MINF;
float sum = 0.0f;
float sumWeight = 0.0f;
const float depthCenter = d_input[y*width + x];
if (depthCenter != MINF)
{
const float curSigma = sigmaD / depthCenter * adaptFactor;
const int kernelRadius = (int)ceil(2.0*curSigma);
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float currentDepth = d_input[n*width + m];
if (currentDepth != MINF && fabs(depthCenter - currentDepth) < sigmaR)
{
const float weight = gaussD(curSigma, m - x, n - y);
sumWeight += weight;
sum += weight*currentDepth;
}
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
void CUDAImageUtil::adaptiveGaussFilterDepthMap(float* d_output, const float* d_input, float sigmaD, float sigmaR, float adaptFactor, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
adaptiveGaussFilterDepthMap_Kernel << <gridSize, blockSize >> >(d_output, d_input, sigmaD, sigmaR, width, height, adaptFactor);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void adaptiveGaussFilterIntensity_Kernel(float* d_output, const float* d_input, const float* d_depth, float sigmaD,
unsigned int width, unsigned int height, float adaptFactor)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
float sum = 0.0f;
float sumWeight = 0.0f;
d_output[y*width + x] = MINF; //(should not be used in the case of no valid depth)
const float depthCenter = d_depth[y*width + x];
if (depthCenter != MINF)
{
const float curSigma = sigmaD / depthCenter * adaptFactor;
const int kernelRadius = (int)ceil(2.0*curSigma);
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float currentDepth = d_depth[n*width + m];
if (currentDepth != MINF) // && fabs(depthCenter - currentDepth) < sigmaR)
{
const float current = d_input[n*width + m];
const float weight = gaussD(curSigma, m - x, n - y);
sumWeight += weight;
sum += weight*current;
}
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
void CUDAImageUtil::adaptiveGaussFilterIntensity(float* d_output, const float* d_input, const float* d_depth, float sigmaD, float adaptFactor, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
adaptiveGaussFilterIntensity_Kernel << <gridSize, blockSize >> >(d_output, d_input, d_depth, sigmaD, width, height, adaptFactor);
#ifdef _DEBUG
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
|
2a336c14a95b3e9b23510d214719acea780f86ae.cu
|
#include "CUDAImageUtil.h"
#include "mlibCuda.h"
#define T_PER_BLOCK 16
#define MINF __int_as_float(0xff800000)
template<class T> void CUDAImageUtil::copy(T* d_output, T* d_input, unsigned int width, unsigned int height) {
MLIB_CUDA_SAFE_CALL(cudaMemcpy(d_output, d_input, sizeof(T)*width*height, cudaMemcpyDeviceToDevice));
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Resample Float Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
inline __device__ float bilinearInterpolationFloat(float x, float y, const float* d_input, unsigned int imageWidth, unsigned int imageHeight)
{
const int2 p00 = make_int2(floor(x), floor(y));
const int2 p01 = p00 + make_int2(0.0f, 1.0f);
const int2 p10 = p00 + make_int2(1.0f, 0.0f);
const int2 p11 = p00 + make_int2(1.0f, 1.0f);
const float alpha = x - p00.x;
const float beta = y - p00.y;
float s0 = 0.0f; float w0 = 0.0f;
if (p00.x < imageWidth && p00.y < imageHeight) { float v00 = d_input[p00.y*imageWidth + p00.x]; if (v00 != MINF) { s0 += (1.0f - alpha)*v00; w0 += (1.0f - alpha); } }
if (p10.x < imageWidth && p10.y < imageHeight) { float v10 = d_input[p10.y*imageWidth + p10.x]; if (v10 != MINF) { s0 += alpha *v10; w0 += alpha; } }
float s1 = 0.0f; float w1 = 0.0f;
if (p01.x < imageWidth && p01.y < imageHeight) { float v01 = d_input[p01.y*imageWidth + p01.x]; if (v01 != MINF) { s1 += (1.0f - alpha)*v01; w1 += (1.0f - alpha); } }
if (p11.x < imageWidth && p11.y < imageHeight) { float v11 = d_input[p11.y*imageWidth + p11.x]; if (v11 != MINF) { s1 += alpha *v11; w1 += alpha; } }
const float p0 = s0 / w0;
const float p1 = s1 / w1;
float ss = 0.0f; float ww = 0.0f;
if (w0 > 0.0f) { ss += (1.0f - beta)*p0; ww += (1.0f - beta); }
if (w1 > 0.0f) { ss += beta *p1; ww += beta; }
if (ww > 0.0f) return ss / ww;
else return MINF;
}
//template<class T>
//__global__ void resample_Kernel(T* d_output, T* d_input, unsigned int inputWidth, unsigned int inputHeight, unsigned int outputWidth, unsigned int outputHeight)
//{
// const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
// const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
//
// if (x < outputWidth && y < outputHeight)
// {
// const float scaleWidth = (float)(inputWidth - 1) / (float)(outputWidth - 1);
// const float scaleHeight = (float)(inputHeight - 1) / (float)(outputHeight - 1);
//
// const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
// const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
//
// if (xInput < inputWidth && yInput < inputHeight)
// {
// if (std::is_same<T, float>::value) {
// d_output[y*outputWidth + x] = (T)bilinearInterpolationFloat(x*scaleWidth, y*scaleHeight, (float*)d_input, inputWidth, inputHeight);
// }
// else if (std::is_same<T, uchar4>::value) {
// d_output[y*outputWidth + x] = d_input[yInput*inputWidth + xInput];
// }
// else {
// //static_assert(false, "bla");
// }
// }
// }
//}
//
//template<class T> void CUDAImageUtil::resample(T* d_output, unsigned int outputWidth, unsigned int outputHeight, T* d_input, unsigned int inputWidth, unsigned int inputHeight) {
//
// const dim3 gridSize((outputWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (outputHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
// const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
//
// resample_Kernel << <gridSize, blockSize >> >(d_output, d_input, inputWidth, inputHeight, outputWidth, outputHeight);
//
//#ifdef _DEBUG
// MLIB_CUDA_SAFE_CALL(cudaDeviceSynchronize());
// MLIB_CUDA_CHECK_ERR(__FUNCTION__);
//#endif
//}
__global__ void resampleFloat_Kernel(float* d_output, unsigned int outputWidth, unsigned int outputHeight, const float* d_input, unsigned int inputWidth, unsigned int inputHeight)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < outputWidth && y < outputHeight)
{
const float scaleWidth = (float)(inputWidth-1) / (float)(outputWidth-1);
const float scaleHeight = (float)(inputHeight-1) / (float)(outputHeight-1);
const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
if (xInput < inputWidth && yInput < inputHeight) {
d_output[y*outputWidth + x] = d_input[yInput*inputWidth + xInput];
//d_output[y*outputWidth + x] = bilinearInterpolationFloat(x*scaleWidth, y*scaleHeight, d_input, inputWidth, inputHeight);
}
}
}
void CUDAImageUtil::resampleFloat(float* d_output, unsigned int outputWidth, unsigned int outputHeight, const float* d_input, unsigned int inputWidth, unsigned int inputHeight) {
const dim3 gridSize((outputWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (outputHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
resampleFloat_Kernel << <gridSize, blockSize >> >(d_output, outputWidth, outputHeight, d_input, inputWidth, inputHeight);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(cudaDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
__global__ void resampleFloat4_Kernel(float4* d_output, unsigned int outputWidth, unsigned int outputHeight, const float4* d_input, unsigned int inputWidth, unsigned int inputHeight)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < outputWidth && y < outputHeight)
{
const float scaleWidth = (float)(inputWidth-1) / (float)(outputWidth-1);
const float scaleHeight = (float)(inputHeight-1) / (float)(outputHeight-1);
const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
if (xInput < inputWidth && yInput < inputHeight) {
d_output[y*outputWidth + x] = d_input[yInput*inputWidth + xInput];
//d_output[y*outputWidth + x] = bilinearInterpolationFloat(x*scaleWidth, y*scaleHeight, d_input, inputWidth, inputHeight);
}
}
}
void CUDAImageUtil::resampleFloat4(float4* d_output, unsigned int outputWidth, unsigned int outputHeight, const float4* d_input, unsigned int inputWidth, unsigned int inputHeight) {
const dim3 gridSize((outputWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (outputHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
resampleFloat4_Kernel << <gridSize, blockSize >> >(d_output, outputWidth, outputHeight, d_input, inputWidth, inputHeight);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(cudaDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
__global__ void resampleUCHAR4_Kernel(uchar4* d_output, unsigned int outputWidth, unsigned int outputHeight, const uchar4* d_input, unsigned int inputWidth, unsigned int inputHeight)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < outputWidth && y < outputHeight)
{
const float scaleWidth = (float)(inputWidth-1) / (float)(outputWidth-1);
const float scaleHeight = (float)(inputHeight-1) / (float)(outputHeight-1);
const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
if (xInput < inputWidth && yInput < inputHeight) {
d_output[y*outputWidth + x] = d_input[yInput*inputWidth + xInput];
}
}
}
void CUDAImageUtil::resampleUCHAR4(uchar4* d_output, unsigned int outputWidth, unsigned int outputHeight, const uchar4* d_input, unsigned int inputWidth, unsigned int inputHeight) {
const dim3 gridSize((outputWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (outputHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
resampleUCHAR4_Kernel << <gridSize, blockSize >> >(d_output, outputWidth, outputHeight, d_input, inputWidth, inputHeight);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(cudaDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Color to Intensity
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__host__ __device__
float convertToIntensity(const uchar4& c) {
return (0.299f*c.x + 0.587f*c.y + 0.114f*c.z) / 255.0f;
}
__global__ void convertUCHAR4ToIntensityFloat_Kernel(float* d_output, const uchar4* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
d_output[y*width + x] = convertToIntensity(d_input[y*width + x]);
}
}
void CUDAImageUtil::convertUCHAR4ToIntensityFloat(float* d_output, const uchar4* d_input, unsigned int width, unsigned int height) {
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
convertUCHAR4ToIntensityFloat_Kernel << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(cudaDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
__global__ void resampleToIntensity_Kernel(float* d_output, unsigned int outputWidth, unsigned int outputHeight, const uchar4* d_input, unsigned int inputWidth, unsigned int inputHeight)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < outputWidth && y < outputHeight)
{
const float scaleWidth = (float)(inputWidth-1) / (float)(outputWidth-1);
const float scaleHeight = (float)(inputHeight-1) / (float)(outputHeight-1);
const unsigned int xInput = (unsigned int)(x*scaleWidth + 0.5f);
const unsigned int yInput = (unsigned int)(y*scaleHeight + 0.5f);
if (xInput < inputWidth && yInput < inputHeight) {
d_output[y*outputWidth + x] = convertToIntensity(d_input[yInput*inputWidth + xInput]);
}
}
}
void CUDAImageUtil::resampleToIntensity(float* d_output, unsigned int outputWidth, unsigned int outputHeight, const uchar4* d_input, unsigned int inputWidth, unsigned int inputHeight) {
const dim3 gridSize((outputWidth + T_PER_BLOCK - 1) / T_PER_BLOCK, (outputHeight + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
resampleToIntensity_Kernel << <gridSize, blockSize >> >(d_output, outputWidth, outputHeight, d_input, inputWidth, inputHeight);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(cudaDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// derivatives
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void computeIntensityDerivatives_Kernel(float2* d_output, const float* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height)
{
d_output[y*width + x] = make_float2(MINF, MINF);
//derivative
if (x > 0 && x < width - 1 && y > 0 && y < height - 1)
{
float pos00 = d_input[(y - 1)*width + (x - 1)]; if (pos00 == MINF) return;
float pos01 = d_input[(y - 0)*width + (x - 1)]; if (pos01 == MINF) return;
float pos02 = d_input[(y + 1)*width + (x - 1)]; if (pos02 == MINF) return;
float pos10 = d_input[(y - 1)*width + (x - 0)]; if (pos10 == MINF) return;
//float pos11 = d_input[(y-0)*width + (x-0)]; if (pos11 == MINF) return;
float pos12 = d_input[(y + 1)*width + (x - 0)]; if (pos12 == MINF) return;
float pos20 = d_input[(y - 1)*width + (x + 1)]; if (pos20 == MINF) return;
float pos21 = d_input[(y - 0)*width + (x + 1)]; if (pos21 == MINF) return;
float pos22 = d_input[(y + 1)*width + (x + 1)]; if (pos22 == MINF) return;
float resU = (-1.0f)*pos00 + (1.0f)*pos20 +
(-2.0f)*pos01 + (2.0f)*pos21 +
(-1.0f)*pos02 + (1.0f)*pos22;
resU /= 8.0f;
float resV = (-1.0f)*pos00 + (-2.0f)*pos10 + (-1.0f)*pos20 +
(1.0f)*pos02 + (2.0f)*pos12 + (1.0f)*pos22;
resV /= 8.0f;
d_output[y*width + x] = make_float2(resU, resV);
}
}
}
void CUDAImageUtil::computeIntensityDerivatives(float2* d_output, const float* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
computeIntensityDerivatives_Kernel << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(cudaDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
__global__ void computeIntensityGradientMagnitude_Kernel(float* d_output, const float* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height)
{
d_output[y*width + x] = MINF;
//derivative
if (x > 0 && x < width - 1 && y > 0 && y < height - 1)
{
float pos00 = d_input[(y - 1)*width + (x - 1)]; if (pos00 == MINF) return;
float pos01 = d_input[(y - 0)*width + (x - 1)]; if (pos01 == MINF) return;
float pos02 = d_input[(y + 1)*width + (x - 1)]; if (pos02 == MINF) return;
float pos10 = d_input[(y - 1)*width + (x - 0)]; if (pos10 == MINF) return;
//float pos11 = d_input[(y-0)*width + (x-0)]; if (pos11 == MINF) return;
float pos12 = d_input[(y + 1)*width + (x - 0)]; if (pos12 == MINF) return;
float pos20 = d_input[(y - 1)*width + (x + 1)]; if (pos20 == MINF) return;
float pos21 = d_input[(y - 0)*width + (x + 1)]; if (pos21 == MINF) return;
float pos22 = d_input[(y + 1)*width + (x + 1)]; if (pos22 == MINF) return;
float resU = (-1.0f)*pos00 + (1.0f)*pos20 +
(-2.0f)*pos01 + (2.0f)*pos21 +
(-1.0f)*pos02 + (1.0f)*pos22;
//resU /= 8.0f;
float resV = (-1.0f)*pos00 + (-2.0f)*pos10 + (-1.0f)*pos20 +
(1.0f)*pos02 + (2.0f)*pos12 + (1.0f)*pos22;
//resV /= 8.0f;
d_output[y*width + x] = sqrt(resU * resU + resV * resV);
}
}
}
void CUDAImageUtil::computeIntensityGradientMagnitude(float* d_output, const float* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
computeIntensityGradientMagnitude_Kernel << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(cudaDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Convert Depth to Camera Space Positions
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void convertDepthFloatToCameraSpaceFloat4_Kernel(float4* d_output, const float* d_input, float4x4 intrinsicsInv, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
d_output[y*width + x] = make_float4(MINF, MINF, MINF, MINF);
float depth = d_input[y*width + x];
if (depth != MINF)
{
float4 cameraSpace(intrinsicsInv*make_float4((float)x*depth, (float)y*depth, depth, depth));
d_output[y*width + x] = make_float4(cameraSpace.x, cameraSpace.y, cameraSpace.w, 1.0f);
//d_output[y*width + x] = make_float4(depthCameraData.kinectDepthToSkeleton(x, y, depth), 1.0f);
}
}
}
void CUDAImageUtil::convertDepthFloatToCameraSpaceFloat4(float4* d_output, const float* d_input, const float4x4& intrinsicsInv, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
convertDepthFloatToCameraSpaceFloat4_Kernel << <gridSize, blockSize >> >(d_output, d_input, intrinsicsInv, width, height);
#ifdef _DEBUG
MLIB_CUDA_SAFE_CALL(cudaDeviceSynchronize());
MLIB_CUDA_CHECK_ERR(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Compute Normal Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void computeNormals_Kernel(float4* d_output, const float4* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
d_output[y*width + x] = make_float4(MINF, MINF, MINF, MINF);
if (x > 0 && x < width - 1 && y > 0 && y < height - 1)
{
const float4 CC = d_input[(y + 0)*width + (x + 0)];
const float4 PC = d_input[(y + 1)*width + (x + 0)];
const float4 CP = d_input[(y + 0)*width + (x + 1)];
const float4 MC = d_input[(y - 1)*width + (x + 0)];
const float4 CM = d_input[(y + 0)*width + (x - 1)];
if (CC.x != MINF && PC.x != MINF && CP.x != MINF && MC.x != MINF && CM.x != MINF)
{
const float3 n = cross(make_float3(PC) - make_float3(MC), make_float3(CP) - make_float3(CM));
const float l = length(n);
if (l > 0.0f)
{
d_output[y*width + x] = make_float4(n / -l, 0.0f);
}
}
}
}
void CUDAImageUtil::computeNormals(float4* d_output, const float4* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
computeNormals_Kernel << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void computeNormalsSobel_Kernel(float4* d_output, const float4* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
d_output[y*width + x] = make_float4(MINF, MINF, MINF, MINF);
if (x > 0 && x < width - 1 && y > 0 && y < height - 1)
{
float4 pos00 = d_input[(y - 1)*width + (x - 1)]; if (pos00.x == MINF) return;
float4 pos01 = d_input[(y - 0)*width + (x - 1)]; if (pos01.x == MINF) return;
float4 pos02 = d_input[(y + 1)*width + (x - 1)]; if (pos02.x == MINF) return;
float4 pos10 = d_input[(y - 1)*width + (x - 0)]; if (pos10.x == MINF) return;
//float4 pos11 = d_input[(y-0)*width + (x-0)]; if (pos11.x == MINF) return;
float4 pos12 = d_input[(y + 1)*width + (x - 0)]; if (pos12.x == MINF) return;
float4 pos20 = d_input[(y - 1)*width + (x + 1)]; if (pos20.x == MINF) return;
float4 pos21 = d_input[(y - 0)*width + (x + 1)]; if (pos21.x == MINF) return;
float4 pos22 = d_input[(y + 1)*width + (x + 1)]; if (pos22.x == MINF) return;
float4 resU = (-1.0f)*pos00 + (1.0f)*pos20 +
(-2.0f)*pos01 + (2.0f)*pos21 +
(-1.0f)*pos02 + (1.0f)*pos22;
float4 resV = (-1.0f)*pos00 + (-2.0f)*pos10 + (-1.0f)*pos20 +
(1.0f)*pos02 + (2.0f)*pos12 + (1.0f)*pos22;
const float3 n = cross(make_float3(resU.x, resU.y, resU.z), make_float3(resV.x, resV.y, resV.z));
const float l = length(n);
if (l > 0.0f) d_output[y*width + x] = make_float4(n / l, 0.0f);
}
}
void CUDAImageUtil::computeNormalsSobel(float4* d_output, const float4* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
computeNormalsSobel_Kernel << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void convertNormalsFloat4ToUCHAR4_Kernel(uchar4* d_output, const float4* d_input, unsigned int width, unsigned int height)
{
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < width && y < height) {
d_output[y*width + x] = make_uchar4(0, 0, 0, 0);
float4 p = d_input[y*width + x];
if (p.x != MINF)
{
p = (p + 1.0f) / 2.0f; // -> [0, 1]
d_output[y*width + x] = make_uchar4((uchar)round(p.x * 255), (uchar)round(p.y * 255), (uchar)round(p.z * 255), 0);
}
}
}
void CUDAImageUtil::convertNormalsFloat4ToUCHAR4(uchar4* d_output, const float4* d_input, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
convertNormalsFloat4ToUCHAR4_Kernel << <gridSize, blockSize >> >(d_output, d_input, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Joint Bilateral Filter
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
inline __device__ float gaussD(float sigma, int x, int y)
{
return exp(-((x*x + y*y) / (2.0f*sigma*sigma)));
}
inline __device__ float gaussR(float sigma, float dist)
{
return exp(-(dist*dist) / (2.0*sigma*sigma));
}
__global__ void bilateralFilterUCHAR4_Kernel(uchar4* d_output, uchar4* d_color, float* d_depth, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int kernelRadius = (int)ceil(2.0*sigmaD);
d_output[y*width + x] = d_color[y*width + x];
float3 sum = make_float3(0.0f, 0.0f, 0.0f);
float sumWeight = 0.0f;
const float depthCenter = d_depth[y*width + x];
if (depthCenter != MINF)
{
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const uchar4 cur = d_color[n*width + m];
const float currentDepth = d_depth[n*width + m];
if (currentDepth != MINF) {
const float weight = gaussD(sigmaD, m - x, n - y)*gaussR(sigmaR, currentDepth - depthCenter);
sumWeight += weight;
sum += weight*make_float3(cur.x, cur.y, cur.z);
}
}
}
}
if (sumWeight > 0.0f) {
float3 res = sum / sumWeight;
d_output[y*width + x] = make_uchar4((uchar)res.x, (uchar)res.y, (uchar)res.z, 255);
}
}
}
void CUDAImageUtil::jointBilateralFilterColorUCHAR4(uchar4* d_output, uchar4* d_input, float* d_depth, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
bilateralFilterUCHAR4_Kernel << <gridSize, blockSize >> >(d_output, d_input, d_depth, sigmaD, sigmaR, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void bilateralFilterFloat_Kernel(float* d_output, float* d_input, float* d_depth, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int kernelRadius = (int)ceil(2.0*sigmaD);
d_output[y*width + x] = MINF;
float sum = 0.0f;
float sumWeight = 0.0f;
const float depthCenter = d_depth[y*width + x];
if (depthCenter != MINF)
{
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float cur = d_input[n*width + m];
const float currentDepth = d_depth[n*width + m];
if (currentDepth != MINF && fabs(depthCenter - currentDepth) < sigmaR)
{ //const float weight = gaussD(sigmaD, m - x, n - y)*gaussR(sigmaR, currentDepth - depthCenter);
const float weight = gaussD(sigmaD, m - x, n - y);
sumWeight += weight;
sum += weight*cur;
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
}
void CUDAImageUtil::jointBilateralFilterFloat(float* d_output, float* d_input, float* d_depth, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
bilateralFilterFloat_Kernel << <gridSize, blockSize >> >(d_output, d_input, d_depth, sigmaD, sigmaR, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void adaptiveBilateralFilterIntensity_Kernel(float* d_output, const float* d_input, const float* d_depth, float sigmaD, float sigmaR, float adaptFactor, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
d_output[y*width + x] = MINF;
float sum = 0.0f;
float sumWeight = 0.0f;
const float depthCenter = d_depth[y*width + x];
if (depthCenter != MINF)
{
const float curSigma = sigmaD * adaptFactor / depthCenter;
const int kernelRadius = (int)ceil(2.0*curSigma);
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float cur = d_input[n*width + m];
const float currentDepth = d_depth[n*width + m];
if (currentDepth != MINF && fabs(depthCenter - currentDepth) < sigmaR)
{ //const float weight = gaussD(curSigma, m - x, n - y)*gaussR(sigmaR, currentDepth - depthCenter);
const float weight = gaussD(curSigma, m - x, n - y);
sumWeight += weight;
sum += weight*cur;
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
}
void CUDAImageUtil::adaptiveBilateralFilterIntensity(float* d_output, const float* d_input, const float* d_depth, float sigmaD, float sigmaR, float adaptFactor, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
adaptiveBilateralFilterIntensity_Kernel << <gridSize, blockSize >> >(d_output, d_input, d_depth, sigmaD, sigmaR, adaptFactor, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Erode Depth Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void erodeDepthMapDevice(float* d_output, float* d_input, int structureSize, int width, int height, float dThresh, float fracReq)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= 0 && x < width && y >= 0 && y < height)
{
unsigned int count = 0;
float oldDepth = d_input[y*width + x];
for (int i = -structureSize; i <= structureSize; i++)
{
for (int j = -structureSize; j <= structureSize; j++)
{
if (x + j >= 0 && x + j < width && y + i >= 0 && y + i < height)
{
float depth = d_input[(y + i)*width + (x + j)];
if (depth == MINF || depth == 0.0f || fabs(depth - oldDepth) > dThresh)
{
count++;
//d_output[y*width+x] = MINF;
//return;
}
}
}
}
unsigned int sum = (2 * structureSize + 1)*(2 * structureSize + 1);
if ((float)count / (float)sum >= fracReq) {
d_output[y*width + x] = MINF;
}
else {
d_output[y*width + x] = d_input[y*width + x];
}
}
}
void CUDAImageUtil::erodeDepthMap(float* d_output, float* d_input, int structureSize, unsigned int width, unsigned int height, float dThresh, float fracReq)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
erodeDepthMapDevice << <gridSize, blockSize >> >(d_output, d_input, structureSize, width, height, dThresh, fracReq);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Gauss Filter Float Map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void gaussFilterDepthMapDevice(float* d_output, const float* d_input, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int kernelRadius = (int)ceil(2.0*sigmaD);
d_output[y*width + x] = MINF;
float sum = 0.0f;
float sumWeight = 0.0f;
const float depthCenter = d_input[y*width + x];
if (depthCenter != MINF)
{
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float currentDepth = d_input[n*width + m];
if (currentDepth != MINF && fabs(depthCenter - currentDepth) < sigmaR)
{
const float weight = gaussD(sigmaD, m - x, n - y);
sumWeight += weight;
sum += weight*currentDepth;
}
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
void CUDAImageUtil::gaussFilterDepthMap(float* d_output, const float* d_input, float sigmaD, float sigmaR, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
gaussFilterDepthMapDevice << <gridSize, blockSize >> >(d_output, d_input, sigmaD, sigmaR, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void gaussFilterIntensityDevice(float* d_output, const float* d_input, float sigmaD, unsigned int width, unsigned int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const int kernelRadius = (int)ceil(2.0*sigmaD);
//d_output[y*width + x] = MINF;
float sum = 0.0f;
float sumWeight = 0.0f;
//const float center = d_input[y*width + x];
//if (center != MINF) {
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float current = d_input[n*width + m];
//if (current != MINF && fabs(center - current) < sigmaR) {
const float weight = gaussD(sigmaD, m - x, n - y);
sumWeight += weight;
sum += weight*current;
//}
}
}
}
//}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
void CUDAImageUtil::gaussFilterIntensity(float* d_output, const float* d_input, float sigmaD, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
gaussFilterIntensityDevice << <gridSize, blockSize >> >(d_output, d_input, sigmaD, width, height);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// adaptive gauss filter float map
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void adaptiveGaussFilterDepthMap_Kernel(float* d_output, const float* d_input, float sigmaD, float sigmaR,
unsigned int width, unsigned int height, float adaptFactor)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
d_output[y*width + x] = MINF;
float sum = 0.0f;
float sumWeight = 0.0f;
const float depthCenter = d_input[y*width + x];
if (depthCenter != MINF)
{
const float curSigma = sigmaD / depthCenter * adaptFactor;
const int kernelRadius = (int)ceil(2.0*curSigma);
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float currentDepth = d_input[n*width + m];
if (currentDepth != MINF && fabs(depthCenter - currentDepth) < sigmaR)
{
const float weight = gaussD(curSigma, m - x, n - y);
sumWeight += weight;
sum += weight*currentDepth;
}
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
void CUDAImageUtil::adaptiveGaussFilterDepthMap(float* d_output, const float* d_input, float sigmaD, float sigmaR, float adaptFactor, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
adaptiveGaussFilterDepthMap_Kernel << <gridSize, blockSize >> >(d_output, d_input, sigmaD, sigmaR, width, height, adaptFactor);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
__global__ void adaptiveGaussFilterIntensity_Kernel(float* d_output, const float* d_input, const float* d_depth, float sigmaD,
unsigned int width, unsigned int height, float adaptFactor)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
float sum = 0.0f;
float sumWeight = 0.0f;
d_output[y*width + x] = MINF; //(should not be used in the case of no valid depth)
const float depthCenter = d_depth[y*width + x];
if (depthCenter != MINF)
{
const float curSigma = sigmaD / depthCenter * adaptFactor;
const int kernelRadius = (int)ceil(2.0*curSigma);
for (int m = x - kernelRadius; m <= x + kernelRadius; m++)
{
for (int n = y - kernelRadius; n <= y + kernelRadius; n++)
{
if (m >= 0 && n >= 0 && m < width && n < height)
{
const float currentDepth = d_depth[n*width + m];
if (currentDepth != MINF) // && fabs(depthCenter - currentDepth) < sigmaR)
{
const float current = d_input[n*width + m];
const float weight = gaussD(curSigma, m - x, n - y);
sumWeight += weight;
sum += weight*current;
}
}
}
}
}
if (sumWeight > 0.0f) d_output[y*width + x] = sum / sumWeight;
}
void CUDAImageUtil::adaptiveGaussFilterIntensity(float* d_output, const float* d_input, const float* d_depth, float sigmaD, float adaptFactor, unsigned int width, unsigned int height)
{
const dim3 gridSize((width + T_PER_BLOCK - 1) / T_PER_BLOCK, (height + T_PER_BLOCK - 1) / T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
adaptiveGaussFilterIntensity_Kernel << <gridSize, blockSize >> >(d_output, d_input, d_depth, sigmaD, width, height, adaptFactor);
#ifdef _DEBUG
cutilSafeCall(cudaDeviceSynchronize());
cutilCheckMsg(__FUNCTION__);
#endif
}
|
38f881cc1a034848b3307ad890cf778a22cf8fc0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*
*Computer Engineering Group, Heidelberg University - GPU Computing Exercise 04
*
* Group : TBD
*
* File : main.cu
*
* Purpose : Memory Operations Benchmark
*
******************************************************************************/
#include <iostream>
#include <iomanip>
#include <cstdlib>
#include <chCommandLine.h>
#include <chTimer.hpp>
#include <stdio.h>
#include <assert.h>
using namespace std;
const static int DEFAULT_MEM_SIZE = 10*1024*1024; // 10 MB
const static int DEFAULT_NUM_ITERATIONS = 1000;
const static int DEFAULT_BLOCK_DIM = 128;
const static int DEFAULT_GRID_DIM = 16;
//
// Function Prototypes
//
void printHelp(char *);
//
// Test Kernel
//
__global__ void
globalMem2SharedMem(float * d_memoryA, int iSize)
{
/* Amount of shared memory is determined by host call */
extern __shared__ float s_memoryA[];
/* Generate global index */
int iID = blockDim.x * blockIdx.x + threadIdx.x;
/* Get the number of available threads */
int iNumThreads = blockDim.x * gridDim.x;
/* Calculate number of elements */
int iNumElements = iSize / sizeof(float);
/* Read global memory (coalesce) to shared memory */
/* Avoid bank conflicts */
for(int i = iID; i < iNumElements; i += iNumThreads)
s_memoryA[i] = d_memoryA[i];
}
__global__ void
SharedMem2globalMem(float * d_memoryA, int iSize)
{
/* Amount of shared memory is determined by host call */
extern __shared__ float s_memoryA[];
/* Generate global index */
int iID = blockDim.x * blockIdx.x + threadIdx.x;
/* Get the number of available threads */
int iNumThreads = blockDim.x * gridDim.x;
/* Calculate number of elements */
int iNumElements = iSize / sizeof(float);
/* Read global memory (coalesce) to shared memory */
for(int i = iID; i < iNumElements; i += iNumThreads)
d_memoryA[i] = s_memoryA[i];
}
__global__ void
SharedMem2Registers(float * outFloat, int iSize)
{
/* Amount of shared memory is determined by host call */
extern __shared__ float s_memoryA[];
/* Variable in register */
float r_var;
/* Generate global index */
int iID = blockDim.x * blockIdx.x + threadIdx.x;
/* Get the number of available threads */
int iNumThreads = blockDim.x * gridDim.x;
/* Calculate number of elements */
int iNumElements = iSize / sizeof(float);
/* Read global memory (coalesce) to shared memory */
for(int i = iID; i < iNumElements; i += iNumThreads)
r_var = s_memoryA[i];
/* Conditionally assign register var, so it won't get optimized */
if(iID == 0) outFloat[0] = r_var;
}
__global__ void
Registers2SharedMem(float * outFloat, int iSize)
{
/* Amount of shared memory is determined by host call */
extern __shared__ float s_memoryA[];
/* Variable in register */
float r_var;
/* Generate global index */
int iID = blockDim.x * blockIdx.x + threadIdx.x;
/* Get the number of available threads */
int iNumThreads = blockDim.x * gridDim.x;
/* Calculate number of elements */
int iNumElements = iSize / sizeof(float);
/* Read global memory (coalesce) to shared memory */
for(int i = iID; i < iNumElements; i += iNumThreads)
s_memoryA[i] = r_var;
/* Conditionally assign register var, so it won't get optimized */
if(iID == 0) outFloat[0] = r_var;
}
__global__ void
bankConflictsRead(float *outFloat, int iStride, unsigned long long *ullTime)
{
/* Static size of shared memory */
__shared__ float s_memoryA[2024];
/* Variable in register */
float r_var;
/* Start measure clock cycles */
unsigned long long startTime = clock64();
/* Access data from shared memory to register */
r_var = s_memoryA[threadIdx.x*iStride];
/* End measure clock cycles */
*ullTime = clock64() - startTime;
/* Conditionally assign register var, so it won't get optimized */
if(threadIdx.x == 0) outFloat[0] = r_var;
}
//
// Main
//
int
main ( int argc, char * argv[] )
{
// Show Help
bool optShowHelp = chCommandLineGetBool("h", argc, argv);
if ( !optShowHelp )
optShowHelp = chCommandLineGetBool("help", argc, argv);
if ( optShowHelp ) {
printHelp ( argv[0] );
exit (0);
}
std::cout << "***" << std::endl
<< "*** Starting ..." << std::endl
<< "***" << std::endl;
ChTimer kernelTimer;
//
// Get kernel launch parameters and configuration
//
int optNumIterations = 0,
optBlockSize = 0,
optGridSize = 0;
// Number of Iterations
chCommandLineGet<int> ( &optNumIterations,"i", argc, argv );
chCommandLineGet<int> ( &optNumIterations,"iterations", argc, argv );
optNumIterations = ( optNumIterations != 0 ) ? optNumIterations : DEFAULT_NUM_ITERATIONS;
// Block Dimension / Threads per Block
chCommandLineGet <int> ( &optBlockSize,"t", argc, argv );
chCommandLineGet <int> ( &optBlockSize,"threads-per-block", argc, argv );
optBlockSize = optBlockSize != 0 ? optBlockSize : DEFAULT_BLOCK_DIM;
if ( optBlockSize > 1024 ) {
std::cout << "\033[31m***" << std::cout
<< "*** Error - The number of threads per block is too big"
<< std::endl
<< "***\033[0m" << std::endl;
exit(-1);
}
// Grid Dimension
chCommandLineGet <int> ( &optGridSize,"g", argc, argv );
chCommandLineGet <int> ( &optGridSize,"grid-dim", argc, argv );
optGridSize = optGridSize != 0 ? optGridSize : DEFAULT_GRID_DIM;
dim3 grid_dim = dim3 ( optGridSize );
dim3 block_dim = dim3 ( optBlockSize );
int optModulo = 32*1024; // modulo in access pattern for conflict test
chCommandLineGet <int> ( &optModulo,"mod", argc, argv );
int optStride = 1; // modulo in access pattern for conflict test
chCommandLineGet <int> ( &optStride,"stride", argc, argv );
// Memory size
int optMemorySize = 0;
chCommandLineGet <int> ( &optMemorySize, "s", argc, argv );
chCommandLineGet <int> ( &optMemorySize, "size", argc, argv );
optMemorySize = optMemorySize != 0 ? optMemorySize : DEFAULT_MEM_SIZE;
//
// Device Memory
//
float* d_memoryA = NULL;
hipMalloc ( &d_memoryA, static_cast <size_t> ( optMemorySize ) ); // optMemorySize is in bytes
float *outFloat = NULL; // dummy variable to prevent compiler optimizations
hipMalloc ( &outFloat, static_cast <float> ( sizeof ( float ) ) );
long hClocks = 0;
long *dClocks = NULL;
hipMalloc ( &dClocks, sizeof ( long ) );
if ( d_memoryA == NULL || dClocks == NULL )
{
std::cout << "\033[31m***" << std::endl
<< "*** Error - Memory allocation failed" << std::endl
<< "***\033[0m" << std::endl;
exit (-1);
}
int shared_dim = optMemorySize;
unsigned long long ullTime = 0.0;
unsigned long long ullTime_it = 0.0;
unsigned long long *d_ullTime;
hipMalloc(&d_ullTime, sizeof(unsigned long long));
//
// Tests
//
std::cout << "Starting kernel: " << grid_dim.x << "x" << block_dim.x << " threads, " << optMemorySize << "B shared memory" << ", " << optNumIterations << " iterations" << std::endl;
kernelTimer.start();
for ( int i = 0; i < optNumIterations; i++ )
{
//
// Launch Kernel
//
if ( chCommandLineGetBool ( "global2shared", argc, argv ) )
{
hipLaunchKernelGGL(( globalMem2SharedMem) , dim3(grid_dim), dim3(block_dim), shared_dim, 0,
d_memoryA, optMemorySize);
}
else if ( chCommandLineGetBool ( "shared2global", argc, argv ) )
{
hipLaunchKernelGGL(( SharedMem2globalMem) , dim3(grid_dim), dim3(block_dim), shared_dim, 0,
d_memoryA, optMemorySize);
}
else if ( chCommandLineGetBool ( "shared2register", argc, argv ) )
{
hipLaunchKernelGGL(( SharedMem2Registers) , dim3(grid_dim), dim3(block_dim), shared_dim, 0,
outFloat, optMemorySize);
}
else if ( chCommandLineGetBool ( "register2shared", argc, argv ) )
{
hipLaunchKernelGGL(( Registers2SharedMem) , dim3(grid_dim), dim3(block_dim), shared_dim, 0,
outFloat, optMemorySize);
}
else if ( chCommandLineGetBool ( "shared2register_conflict", argc, argv ) )
{
hipLaunchKernelGGL(( bankConflictsRead) , dim3(1), dim3(32) , 0, 0,
outFloat, optStride, d_ullTime);
hipMemcpy(&ullTime_it, d_ullTime, sizeof(unsigned long long), hipMemcpyDeviceToHost);
ullTime += ullTime_it;
}
}
// Mandatory synchronize after all kernel launches
hipDeviceSynchronize();
kernelTimer.stop();
hipError_t hipError_t = hipGetLastError();
if ( hipError_t != hipSuccess )
{
std::cout << "\033[31m***" << std::endl
<< "***ERROR*** " << hipError_t << " - " << hipGetErrorString(hipError_t)
<< std::endl
<< "***\033[0m" << std::endl;
return -1;
}
// Print Measurement Results
if ( chCommandLineGetBool ( "global2shared", argc, argv ) ) {
std::cout << "Copy global->shared, size=" << std::setw(10) << optMemorySize << ", gDim=" << std::setw(5) << grid_dim.x << ", bDim=" << std::setw(5) << block_dim.x;
//std::cout << ", time=" << kernelTimer.getTime(optNumIterations) <<
std::cout.precision ( 2 );
std::cout << ", bw=" << std::fixed << std::setw(6) << ( optMemorySize * grid_dim.x ) / kernelTimer.getTime(optNumIterations) / (1E09) << "GB/s" << std::endl;
}
if ( chCommandLineGetBool ( "shared2global", argc, argv ) ) {
std::cout << "Copy shared->global, size=" << std::setw(10) << optMemorySize << ", gDim=" << std::setw(5) << grid_dim.x << ", bDim=" << std::setw(5) << block_dim.x;
//std::cout << ", time=" << kernelTimer.getTime(optNumIterations) <<
std::cout.precision ( 2 );
std::cout << ", bw=" << std::fixed << std::setw(6) << ( optMemorySize * grid_dim.x ) / kernelTimer.getTime(optNumIterations) / (1E09) << "GB/s" << std::endl;
}
if ( chCommandLineGetBool ( "shared2register", argc, argv ) ) {
std::cout << "Copy shared->register, size=" << std::setw(10) << optMemorySize << ", gDim=" << std::setw(5) << grid_dim.x << ", bDim=" << std::setw(5) << block_dim.x;
//std::cout << ", time=" << kernelTimer.getTime(optNumIterations) <<
std::cout.precision ( 2 );
std::cout << ", bw=" << std::fixed << std::setw(6) << ( optMemorySize * grid_dim.x ) / kernelTimer.getTime(optNumIterations) / (1E09) << "GB/s" << std::endl;
}
if ( chCommandLineGetBool ( "register2shared", argc, argv ) ) {
std::cout << "Copy register->shared, size=" << std::setw(10) << optMemorySize << ", gDim=" << std::setw(5) << grid_dim.x << ", bDim=" << std::setw(5) << block_dim.x;
//std::cout << ", time=" << kernelTimer.getTime(optNumIterations) <<
std::cout.precision ( 2 );
std::cout << ", bw=" << std::fixed << std::setw(6) << ( optMemorySize * grid_dim.x ) / kernelTimer.getTime(optNumIterations) / (1E09) << "GB/s" << std::endl;
}
if ( chCommandLineGetBool ( "shared2register_conflict", argc, argv ) ) {
if ( chCommandLineGetBool ( "shared2register_conflict", argc, argv ) ) {
hipError_t error = hipMemcpy ( &hClocks, dClocks, sizeof ( long ), hipMemcpyDeviceToHost );
if ( error != hipSuccess) {
fprintf ( stderr, "hipMemcpy failed: %s\n", hipGetErrorString ( error ) );
return 1;
}
}
std::cout << "Shared memory bank conflict test, size=1024, gDim=1, bDim=32"; // << std::setw(10) << optMemorySize << ", gDim=" << std::setw(5) << grid_dim.x << ", bDim=" << std::setw(5) << block_dim.x;
std::cout << ", stride=" << std::setw(6) << optStride << ", modulo=" << std::setw(6) << optModulo;
std::cout << ", clocks=" << std::setw(10) << ullTime / optNumIterations << std::endl; //hClocks << std::endl;
}
return 0;
}
void
printHelp(char * programName)
{
std::cout
<< "Usage: " << std::endl
<< " " << programName << " [-p] [-s <memory_size>] [-i <num_iterations>]" << std::endl
<< " [-t <threads_per_block>] [-g <blocks_per_grid]" << std::endl
<< " [-stride <stride>] [-offset <offset>]" << std::endl
<< " --{global2shared,shared2global,shared2register,register2shared,shared2register_conflict}" << std::endl
<< " Run kernel analyzing shared memory performance" << std::endl
<< " -s <memory_size>|--size <memory_size>" << std::endl
<< " The amount of memory to allcate" << std::endl
<< " -t <threads_per_block>|--threads-per-block <threads_per_block>" << std::endl
<< " The number of threads per block" << std::endl
<< " -g <blocks_per_grid>|--grid-dim <blocks_per_grid>" << std::endl
<< " The number of blocks per grid" << std::endl
<< " -i <num_iterations>|--iterations <num_iterations>" << std::endl
<< " The number of iterations to launch the kernel" << std::endl
<< " -stride <stride>" << std::endl
<< " Stride parameter for global-stride test. Not that size parameter is ignored then." << std::endl
<< " -offset <offset>" << std::endl
<< " Offset parameter for global-offset test. Not that size parameter is ignored then." << std::endl
<< "" << std::endl;
}
|
38f881cc1a034848b3307ad890cf778a22cf8fc0.cu
|
/******************************************************************************
*
*Computer Engineering Group, Heidelberg University - GPU Computing Exercise 04
*
* Group : TBD
*
* File : main.cu
*
* Purpose : Memory Operations Benchmark
*
******************************************************************************/
#include <iostream>
#include <iomanip>
#include <cstdlib>
#include <chCommandLine.h>
#include <chTimer.hpp>
#include <stdio.h>
#include <assert.h>
using namespace std;
const static int DEFAULT_MEM_SIZE = 10*1024*1024; // 10 MB
const static int DEFAULT_NUM_ITERATIONS = 1000;
const static int DEFAULT_BLOCK_DIM = 128;
const static int DEFAULT_GRID_DIM = 16;
//
// Function Prototypes
//
void printHelp(char *);
//
// Test Kernel
//
__global__ void
globalMem2SharedMem(float * d_memoryA, int iSize)
{
/* Amount of shared memory is determined by host call */
extern __shared__ float s_memoryA[];
/* Generate global index */
int iID = blockDim.x * blockIdx.x + threadIdx.x;
/* Get the number of available threads */
int iNumThreads = blockDim.x * gridDim.x;
/* Calculate number of elements */
int iNumElements = iSize / sizeof(float);
/* Read global memory (coalesce) to shared memory */
/* Avoid bank conflicts */
for(int i = iID; i < iNumElements; i += iNumThreads)
s_memoryA[i] = d_memoryA[i];
}
__global__ void
SharedMem2globalMem(float * d_memoryA, int iSize)
{
/* Amount of shared memory is determined by host call */
extern __shared__ float s_memoryA[];
/* Generate global index */
int iID = blockDim.x * blockIdx.x + threadIdx.x;
/* Get the number of available threads */
int iNumThreads = blockDim.x * gridDim.x;
/* Calculate number of elements */
int iNumElements = iSize / sizeof(float);
/* Read global memory (coalesce) to shared memory */
for(int i = iID; i < iNumElements; i += iNumThreads)
d_memoryA[i] = s_memoryA[i];
}
__global__ void
SharedMem2Registers(float * outFloat, int iSize)
{
/* Amount of shared memory is determined by host call */
extern __shared__ float s_memoryA[];
/* Variable in register */
float r_var;
/* Generate global index */
int iID = blockDim.x * blockIdx.x + threadIdx.x;
/* Get the number of available threads */
int iNumThreads = blockDim.x * gridDim.x;
/* Calculate number of elements */
int iNumElements = iSize / sizeof(float);
/* Read global memory (coalesce) to shared memory */
for(int i = iID; i < iNumElements; i += iNumThreads)
r_var = s_memoryA[i];
/* Conditionally assign register var, so it won't get optimized */
if(iID == 0) outFloat[0] = r_var;
}
__global__ void
Registers2SharedMem(float * outFloat, int iSize)
{
/* Amount of shared memory is determined by host call */
extern __shared__ float s_memoryA[];
/* Variable in register */
float r_var;
/* Generate global index */
int iID = blockDim.x * blockIdx.x + threadIdx.x;
/* Get the number of available threads */
int iNumThreads = blockDim.x * gridDim.x;
/* Calculate number of elements */
int iNumElements = iSize / sizeof(float);
/* Read global memory (coalesce) to shared memory */
for(int i = iID; i < iNumElements; i += iNumThreads)
s_memoryA[i] = r_var;
/* Conditionally assign register var, so it won't get optimized */
if(iID == 0) outFloat[0] = r_var;
}
__global__ void
bankConflictsRead(float *outFloat, int iStride, unsigned long long *ullTime)
{
/* Static size of shared memory */
__shared__ float s_memoryA[2024];
/* Variable in register */
float r_var;
/* Start measure clock cycles */
unsigned long long startTime = clock64();
/* Access data from shared memory to register */
r_var = s_memoryA[threadIdx.x*iStride];
/* End measure clock cycles */
*ullTime = clock64() - startTime;
/* Conditionally assign register var, so it won't get optimized */
if(threadIdx.x == 0) outFloat[0] = r_var;
}
//
// Main
//
int
main ( int argc, char * argv[] )
{
// Show Help
bool optShowHelp = chCommandLineGetBool("h", argc, argv);
if ( !optShowHelp )
optShowHelp = chCommandLineGetBool("help", argc, argv);
if ( optShowHelp ) {
printHelp ( argv[0] );
exit (0);
}
std::cout << "***" << std::endl
<< "*** Starting ..." << std::endl
<< "***" << std::endl;
ChTimer kernelTimer;
//
// Get kernel launch parameters and configuration
//
int optNumIterations = 0,
optBlockSize = 0,
optGridSize = 0;
// Number of Iterations
chCommandLineGet<int> ( &optNumIterations,"i", argc, argv );
chCommandLineGet<int> ( &optNumIterations,"iterations", argc, argv );
optNumIterations = ( optNumIterations != 0 ) ? optNumIterations : DEFAULT_NUM_ITERATIONS;
// Block Dimension / Threads per Block
chCommandLineGet <int> ( &optBlockSize,"t", argc, argv );
chCommandLineGet <int> ( &optBlockSize,"threads-per-block", argc, argv );
optBlockSize = optBlockSize != 0 ? optBlockSize : DEFAULT_BLOCK_DIM;
if ( optBlockSize > 1024 ) {
std::cout << "\033[31m***" << std::cout
<< "*** Error - The number of threads per block is too big"
<< std::endl
<< "***\033[0m" << std::endl;
exit(-1);
}
// Grid Dimension
chCommandLineGet <int> ( &optGridSize,"g", argc, argv );
chCommandLineGet <int> ( &optGridSize,"grid-dim", argc, argv );
optGridSize = optGridSize != 0 ? optGridSize : DEFAULT_GRID_DIM;
dim3 grid_dim = dim3 ( optGridSize );
dim3 block_dim = dim3 ( optBlockSize );
int optModulo = 32*1024; // modulo in access pattern for conflict test
chCommandLineGet <int> ( &optModulo,"mod", argc, argv );
int optStride = 1; // modulo in access pattern for conflict test
chCommandLineGet <int> ( &optStride,"stride", argc, argv );
// Memory size
int optMemorySize = 0;
chCommandLineGet <int> ( &optMemorySize, "s", argc, argv );
chCommandLineGet <int> ( &optMemorySize, "size", argc, argv );
optMemorySize = optMemorySize != 0 ? optMemorySize : DEFAULT_MEM_SIZE;
//
// Device Memory
//
float* d_memoryA = NULL;
cudaMalloc ( &d_memoryA, static_cast <size_t> ( optMemorySize ) ); // optMemorySize is in bytes
float *outFloat = NULL; // dummy variable to prevent compiler optimizations
cudaMalloc ( &outFloat, static_cast <float> ( sizeof ( float ) ) );
long hClocks = 0;
long *dClocks = NULL;
cudaMalloc ( &dClocks, sizeof ( long ) );
if ( d_memoryA == NULL || dClocks == NULL )
{
std::cout << "\033[31m***" << std::endl
<< "*** Error - Memory allocation failed" << std::endl
<< "***\033[0m" << std::endl;
exit (-1);
}
int shared_dim = optMemorySize;
unsigned long long ullTime = 0.0;
unsigned long long ullTime_it = 0.0;
unsigned long long *d_ullTime;
cudaMalloc(&d_ullTime, sizeof(unsigned long long));
//
// Tests
//
std::cout << "Starting kernel: " << grid_dim.x << "x" << block_dim.x << " threads, " << optMemorySize << "B shared memory" << ", " << optNumIterations << " iterations" << std::endl;
kernelTimer.start();
for ( int i = 0; i < optNumIterations; i++ )
{
//
// Launch Kernel
//
if ( chCommandLineGetBool ( "global2shared", argc, argv ) )
{
globalMem2SharedMem <<< grid_dim, block_dim, shared_dim>>>
(d_memoryA, optMemorySize);
}
else if ( chCommandLineGetBool ( "shared2global", argc, argv ) )
{
SharedMem2globalMem <<< grid_dim, block_dim, shared_dim>>>
(d_memoryA, optMemorySize);
}
else if ( chCommandLineGetBool ( "shared2register", argc, argv ) )
{
SharedMem2Registers <<< grid_dim, block_dim, shared_dim>>>
(outFloat, optMemorySize);
}
else if ( chCommandLineGetBool ( "register2shared", argc, argv ) )
{
Registers2SharedMem <<< grid_dim, block_dim, shared_dim>>>
(outFloat, optMemorySize);
}
else if ( chCommandLineGetBool ( "shared2register_conflict", argc, argv ) )
{
bankConflictsRead <<< 1, 32 >>>
(outFloat, optStride, d_ullTime);
cudaMemcpy(&ullTime_it, d_ullTime, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
ullTime += ullTime_it;
}
}
// Mandatory synchronize after all kernel launches
cudaDeviceSynchronize();
kernelTimer.stop();
cudaError_t cudaError = cudaGetLastError();
if ( cudaError != cudaSuccess )
{
std::cout << "\033[31m***" << std::endl
<< "***ERROR*** " << cudaError << " - " << cudaGetErrorString(cudaError)
<< std::endl
<< "***\033[0m" << std::endl;
return -1;
}
// Print Measurement Results
if ( chCommandLineGetBool ( "global2shared", argc, argv ) ) {
std::cout << "Copy global->shared, size=" << std::setw(10) << optMemorySize << ", gDim=" << std::setw(5) << grid_dim.x << ", bDim=" << std::setw(5) << block_dim.x;
//std::cout << ", time=" << kernelTimer.getTime(optNumIterations) <<
std::cout.precision ( 2 );
std::cout << ", bw=" << std::fixed << std::setw(6) << ( optMemorySize * grid_dim.x ) / kernelTimer.getTime(optNumIterations) / (1E09) << "GB/s" << std::endl;
}
if ( chCommandLineGetBool ( "shared2global", argc, argv ) ) {
std::cout << "Copy shared->global, size=" << std::setw(10) << optMemorySize << ", gDim=" << std::setw(5) << grid_dim.x << ", bDim=" << std::setw(5) << block_dim.x;
//std::cout << ", time=" << kernelTimer.getTime(optNumIterations) <<
std::cout.precision ( 2 );
std::cout << ", bw=" << std::fixed << std::setw(6) << ( optMemorySize * grid_dim.x ) / kernelTimer.getTime(optNumIterations) / (1E09) << "GB/s" << std::endl;
}
if ( chCommandLineGetBool ( "shared2register", argc, argv ) ) {
std::cout << "Copy shared->register, size=" << std::setw(10) << optMemorySize << ", gDim=" << std::setw(5) << grid_dim.x << ", bDim=" << std::setw(5) << block_dim.x;
//std::cout << ", time=" << kernelTimer.getTime(optNumIterations) <<
std::cout.precision ( 2 );
std::cout << ", bw=" << std::fixed << std::setw(6) << ( optMemorySize * grid_dim.x ) / kernelTimer.getTime(optNumIterations) / (1E09) << "GB/s" << std::endl;
}
if ( chCommandLineGetBool ( "register2shared", argc, argv ) ) {
std::cout << "Copy register->shared, size=" << std::setw(10) << optMemorySize << ", gDim=" << std::setw(5) << grid_dim.x << ", bDim=" << std::setw(5) << block_dim.x;
//std::cout << ", time=" << kernelTimer.getTime(optNumIterations) <<
std::cout.precision ( 2 );
std::cout << ", bw=" << std::fixed << std::setw(6) << ( optMemorySize * grid_dim.x ) / kernelTimer.getTime(optNumIterations) / (1E09) << "GB/s" << std::endl;
}
if ( chCommandLineGetBool ( "shared2register_conflict", argc, argv ) ) {
if ( chCommandLineGetBool ( "shared2register_conflict", argc, argv ) ) {
cudaError_t error = cudaMemcpy ( &hClocks, dClocks, sizeof ( long ), cudaMemcpyDeviceToHost );
if ( error != cudaSuccess) {
fprintf ( stderr, "cudaMemcpy failed: %s\n", cudaGetErrorString ( error ) );
return 1;
}
}
std::cout << "Shared memory bank conflict test, size=1024, gDim=1, bDim=32"; // << std::setw(10) << optMemorySize << ", gDim=" << std::setw(5) << grid_dim.x << ", bDim=" << std::setw(5) << block_dim.x;
std::cout << ", stride=" << std::setw(6) << optStride << ", modulo=" << std::setw(6) << optModulo;
std::cout << ", clocks=" << std::setw(10) << ullTime / optNumIterations << std::endl; //hClocks << std::endl;
}
return 0;
}
void
printHelp(char * programName)
{
std::cout
<< "Usage: " << std::endl
<< " " << programName << " [-p] [-s <memory_size>] [-i <num_iterations>]" << std::endl
<< " [-t <threads_per_block>] [-g <blocks_per_grid]" << std::endl
<< " [-stride <stride>] [-offset <offset>]" << std::endl
<< " --{global2shared,shared2global,shared2register,register2shared,shared2register_conflict}" << std::endl
<< " Run kernel analyzing shared memory performance" << std::endl
<< " -s <memory_size>|--size <memory_size>" << std::endl
<< " The amount of memory to allcate" << std::endl
<< " -t <threads_per_block>|--threads-per-block <threads_per_block>" << std::endl
<< " The number of threads per block" << std::endl
<< " -g <blocks_per_grid>|--grid-dim <blocks_per_grid>" << std::endl
<< " The number of blocks per grid" << std::endl
<< " -i <num_iterations>|--iterations <num_iterations>" << std::endl
<< " The number of iterations to launch the kernel" << std::endl
<< " -stride <stride>" << std::endl
<< " Stride parameter for global-stride test. Not that size parameter is ignored then." << std::endl
<< " -offset <offset>" << std::endl
<< " Offset parameter for global-offset test. Not that size parameter is ignored then." << std::endl
<< "" << std::endl;
}
|
e12341805e238664d00fcb3c1b1866d721a29f24.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
namespace {
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
// The number of cuda threads to use. 512 is used for backward compatibility
constexpr int ROI_CUDA_NUM_THREADS = 512;
// The maximum number of blocks to use in the default kernel call.
constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096;
/**
* @brief Compute the number of blocks needed to run N threads.
*/
inline int ROI_GET_BLOCKS(const int N) {
return ::max(
::min(
(N + ROI_CUDA_NUM_THREADS - 1) / ROI_CUDA_NUM_THREADS,
ROI_MAXIMUM_NUM_BLOCKS),
// Use at least 1 block, since CUDA does not allow empty block
1);
}
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForwardKernel(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T* w1,
T* w2,
T* w3,
T* w4,
int* x_low,
int* x_high,
int* y_low,
int* y_high,
const int /*index*/ /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
*w1 = *w2 = *w3 = *w4 = 0.;
*x_low = *x_high = *y_low = *y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = (T)*y_low;
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = (T)*x_low;
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low;
T lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <>
inline __device__ double gpu_atomic_add(const double val, double* address) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull;
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return val;
}
template <typename T>
__global__ void RoIAlignBackwardKernel(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
&w1,
&w2,
&w3,
&w4,
&x_low,
&x_high,
&y_low,
&y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
/*
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
*/
gpu_atomic_add(
static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low);
gpu_atomic_add(
static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high);
gpu_atomic_add(
static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low);
gpu_atomic_add(
static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
} // namespace
at::Tensor ROIAlignForwardCUDA(
const at::Tensor input,
const at::Tensor rois,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio) {
AT_ASSERT(input.is_contiguous());
AT_ASSERT(rois.is_contiguous());
AT_ASSERT(input.ndimension() == 4);
AT_ASSERT(rois.ndimension() == 2);
AT_ASSERT(rois.size(1) == 5);
auto proposals = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
auto output = input.type().tensor({proposals, channels, pooled_height, pooled_width});
auto count = output.numel();
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignForwardCUDA", ([&] {
hipLaunchKernelGGL(( RoIAlignForwardKernel<scalar_t>)
, dim3(ROI_GET_BLOCKS(count)),
dim3(ROI_CUDA_NUM_THREADS),
0,
at::globalContext().getCurrentHIPStreamMasqueradingAsCUDA(),
count,
input.data<scalar_t>(),
static_cast<scalar_t>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.data<scalar_t>(),
output.data<scalar_t>());
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return output;
}
at::Tensor ROIAlignBackwardCUDA(
const at::Tensor rois,
const at::Tensor grad_output,
int64_t b_size,
int64_t channels,
int64_t height,
int64_t width,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio) {
AT_ASSERT(rois.is_contiguous());
AT_ASSERT(rois.ndimension() == 2);
AT_ASSERT(rois.size(1) == 5);
auto roi_cols = rois.size(1);
AT_ASSERT(roi_cols == 4 || roi_cols == 5);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
// gradient wrt input features
auto grad_in = rois.type().tensor({b_size, channels, height, width}).zero_();
auto num_rois = rois.size(0);
auto count = grad_output.numel();
AT_DISPATCH_FLOATING_TYPES(rois.type(), "ROIAlignBackwardCUDA", ([&] {
hipLaunchKernelGGL(( RoIAlignBackwardKernel<scalar_t>)
, dim3(ROI_GET_BLOCKS(count)),
dim3(ROI_CUDA_NUM_THREADS),
0,
at::globalContext().getCurrentHIPStreamMasqueradingAsCUDA(),
count,
grad_output.data<scalar_t>(),
num_rois,
static_cast<scalar_t>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_in.data<scalar_t>(),
rois.data<scalar_t>());
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return grad_in;
}
|
e12341805e238664d00fcb3c1b1866d721a29f24.cu
|
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
namespace {
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
// The number of cuda threads to use. 512 is used for backward compatibility
constexpr int ROI_CUDA_NUM_THREADS = 512;
// The maximum number of blocks to use in the default kernel call.
constexpr int ROI_MAXIMUM_NUM_BLOCKS = 4096;
/**
* @brief Compute the number of blocks needed to run N threads.
*/
inline int ROI_GET_BLOCKS(const int N) {
return std::max(
std::min(
(N + ROI_CUDA_NUM_THREADS - 1) / ROI_CUDA_NUM_THREADS,
ROI_MAXIMUM_NUM_BLOCKS),
// Use at least 1 block, since CUDA does not allow empty block
1);
}
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForwardKernel(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T* w1,
T* w2,
T* w3,
T* w4,
int* x_low,
int* x_high,
int* y_low,
int* y_high,
const int /*index*/ /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
*w1 = *w2 = *w3 = *w4 = 0.;
*x_low = *x_high = *y_low = *y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = (T)*y_low;
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = (T)*x_low;
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low;
T lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <>
inline __device__ double gpu_atomic_add(const double val, double* address) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull;
unsigned long long int assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return val;
}
template <typename T>
__global__ void RoIAlignBackwardKernel(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
&w1,
&w2,
&w3,
&w4,
&x_low,
&x_high,
&y_low,
&y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
/*
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
*/
gpu_atomic_add(
static_cast<T>(g1), offset_bottom_diff + y_low * width + x_low);
gpu_atomic_add(
static_cast<T>(g2), offset_bottom_diff + y_low * width + x_high);
gpu_atomic_add(
static_cast<T>(g3), offset_bottom_diff + y_high * width + x_low);
gpu_atomic_add(
static_cast<T>(g4), offset_bottom_diff + y_high * width + x_high);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
} // namespace
at::Tensor ROIAlignForwardCUDA(
const at::Tensor input,
const at::Tensor rois,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio) {
AT_ASSERT(input.is_contiguous());
AT_ASSERT(rois.is_contiguous());
AT_ASSERT(input.ndimension() == 4);
AT_ASSERT(rois.ndimension() == 2);
AT_ASSERT(rois.size(1) == 5);
auto proposals = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
auto output = input.type().tensor({proposals, channels, pooled_height, pooled_width});
auto count = output.numel();
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignForwardCUDA", ([&] {
RoIAlignForwardKernel<scalar_t>
<<<ROI_GET_BLOCKS(count),
ROI_CUDA_NUM_THREADS,
0,
at::globalContext().getCurrentCUDAStream()>>>(
count,
input.data<scalar_t>(),
static_cast<scalar_t>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.data<scalar_t>(),
output.data<scalar_t>());
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return output;
}
at::Tensor ROIAlignBackwardCUDA(
const at::Tensor rois,
const at::Tensor grad_output,
int64_t b_size,
int64_t channels,
int64_t height,
int64_t width,
int64_t pooled_height,
int64_t pooled_width,
double spatial_scale,
int64_t sampling_ratio) {
AT_ASSERT(rois.is_contiguous());
AT_ASSERT(rois.ndimension() == 2);
AT_ASSERT(rois.size(1) == 5);
auto roi_cols = rois.size(1);
AT_ASSERT(roi_cols == 4 || roi_cols == 5);
// Output Tensor is (num_rois, C, pooled_height, pooled_width)
// gradient wrt input features
auto grad_in = rois.type().tensor({b_size, channels, height, width}).zero_();
auto num_rois = rois.size(0);
auto count = grad_output.numel();
AT_DISPATCH_FLOATING_TYPES(rois.type(), "ROIAlignBackwardCUDA", ([&] {
RoIAlignBackwardKernel<scalar_t>
<<<ROI_GET_BLOCKS(count),
ROI_CUDA_NUM_THREADS,
0,
at::globalContext().getCurrentCUDAStream()>>>(
count,
grad_output.data<scalar_t>(),
num_rois,
static_cast<scalar_t>(spatial_scale),
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_in.data<scalar_t>(),
rois.data<scalar_t>());
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return grad_in;
}
|
cca74c752396f8194daa9a207627aeb830217ba9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <random>
#include <hip/hip_runtime.h>
#include "kernels_hip.cuh"
#include "utils.h"
void Verify(float *out_vals,
int *out_indices,
int out_num,
float *out_vals_ref,
int *out_indices_ref,
int out_num_ref) {
if (out_num != out_num_ref) {
printf("out_num(%d) and out_num_ref(%d) doesn't match\n", out_num, out_num_ref);
return;
}
printf("out_num(%d) and out_num_ref(%d) matches\n", out_num, out_num_ref);
for (int i = 0; i < out_num; ++i) {
if (out_indices[i] != out_indices_ref[i]) {
printf("indices(%d) and ref(%d) differed at idx %d\n", out_indices[i], out_indices_ref[i], i);
}
}
for (int i = 0; i < out_num; ++i) {
if (out_vals[i] != out_vals_ref[i]) {
printf("vals(%f) and ref(%f) differed at idx %d\n", out_vals[i], out_vals_ref[i], i);
}
}
}
float GetMean(float *in, int num_items) {
float mean = 0.0f;
for (int i = 0; i < num_items; ++i) {
mean += abs(in[i]);
}
mean /= num_items;
return mean;
}
float GetMax(float *in, int num_items) {
float max = 0.0f;
for (int i = 0; i < num_items; ++i) {
if (max < abs(in[i])) {
max = abs(in[i]);
}
}
return max;
}
int CountNonZero(float *in, float threshold, int num_items) {
int acc = 0;
for (int i = 0; i < num_items; ++i) {
if (in[i] > threshold) {
acc += 1;
}
}
return acc;
}
int GetSparseTensor(float *in, float *out_vals, int *out_indices, float threshold, int num_items) {
int acc = 0;
for (int i = 0; i < num_items; ++i) {
if (abs(in[i]) > threshold) {
out_vals[acc] = in[i];
out_indices[acc] = i;
acc += 1;
}
}
return acc;
}
void TrimmedTopKRef(float *in, float *out_vals_ref, int *out_indices_ref, int *out_num_ref, int k, float eta, int num_items) {
float mean = GetMean(in, num_items);
float max = GetMax(in, num_items);
float l = 0.0f;
float r = 1.0f;
float threshold = 0.0f;
int nnz = 0;
while (r - l > eta) {
float ratio = l + (r-l)/2;
threshold = mean + ratio * (max - mean);
printf("threshold %f\n", threshold);
nnz = CountNonZero(in, threshold, num_items);
if (nnz > k && nnz < 2*k) {
break;
}
else {
if (nnz < k/2) {
r = ratio;
}
else {
l = ratio;
}
}
}
out_num_ref[0] = GetSparseTensor(in, out_vals_ref, out_indices_ref, threshold, num_items);
}
void TopKLastStepRef(float *in, float *out_vals_ref, int *out_indices_ref, int *out_num_ref, float threshold, int num_items) {
out_num_ref[0] = GetSparseTensor(in, out_vals_ref, out_indices_ref, threshold, num_items);
}
int main(int argc, char *argv[])
{
std::default_random_engine generator;
std::normal_distribution<float> distribution(0.0f, 10.0f);
int num_items = 1024*1024*atoi(argv[1]);
float eta = 0.0001f;
hipcub::CachingDeviceAllocator g_allocator(true);
float* in = new float[num_items];
float* out_vals = new float[num_items];
int* out_indices = new int[num_items];
int* out_num = new int[1];
float* out_vals_ref = new float[num_items];
int* out_indices_ref = new int[num_items];
int* out_num_ref = new int[1];
for (int i = 0; i < num_items; ++i) {
in[i] = distribution(generator);
}
GpuTimer timer;
for (int i = 1; i < 6; ++i) {
int k = 1024*i;
timer.Start();
float threshold = TrimmedTopK<1024>(g_allocator, in, out_vals, out_indices, out_num, k, eta, num_items);
timer.Stop();
TopKLastStepRef(in, out_vals_ref, out_indices_ref, out_num_ref, threshold, num_items);
// Disabled the total test since different precision of floating point numbers on the GPU and on the CPU.
// TrimmedTopKRef(in, out_vals_ref, out_indices_ref, out_num_ref, k, eta, num_items);
printf("testing for k = %d\n", k);
Verify(out_vals, out_indices, out_num[0], out_vals_ref, out_indices_ref, out_num_ref[0]);
printf("elapsed milliseconds: %f\n", timer.ElapsedMillis());
printf("================================\n");
}
if (in) delete[] in;
if (out_vals) delete[] out_vals;
if (out_indices) delete[] out_indices;
if (out_num) delete[] out_num;
}
|
cca74c752396f8194daa9a207627aeb830217ba9.cu
|
#include <cstdio>
#include <cstdlib>
#include <random>
#include <cuda.h>
#include "kernels.cuh"
#include "utils.h"
void Verify(float *out_vals,
int *out_indices,
int out_num,
float *out_vals_ref,
int *out_indices_ref,
int out_num_ref) {
if (out_num != out_num_ref) {
printf("out_num(%d) and out_num_ref(%d) doesn't match\n", out_num, out_num_ref);
return;
}
printf("out_num(%d) and out_num_ref(%d) matches\n", out_num, out_num_ref);
for (int i = 0; i < out_num; ++i) {
if (out_indices[i] != out_indices_ref[i]) {
printf("indices(%d) and ref(%d) differed at idx %d\n", out_indices[i], out_indices_ref[i], i);
}
}
for (int i = 0; i < out_num; ++i) {
if (out_vals[i] != out_vals_ref[i]) {
printf("vals(%f) and ref(%f) differed at idx %d\n", out_vals[i], out_vals_ref[i], i);
}
}
}
float GetMean(float *in, int num_items) {
float mean = 0.0f;
for (int i = 0; i < num_items; ++i) {
mean += abs(in[i]);
}
mean /= num_items;
return mean;
}
float GetMax(float *in, int num_items) {
float max = 0.0f;
for (int i = 0; i < num_items; ++i) {
if (max < abs(in[i])) {
max = abs(in[i]);
}
}
return max;
}
int CountNonZero(float *in, float threshold, int num_items) {
int acc = 0;
for (int i = 0; i < num_items; ++i) {
if (in[i] > threshold) {
acc += 1;
}
}
return acc;
}
int GetSparseTensor(float *in, float *out_vals, int *out_indices, float threshold, int num_items) {
int acc = 0;
for (int i = 0; i < num_items; ++i) {
if (abs(in[i]) > threshold) {
out_vals[acc] = in[i];
out_indices[acc] = i;
acc += 1;
}
}
return acc;
}
void TrimmedTopKRef(float *in, float *out_vals_ref, int *out_indices_ref, int *out_num_ref, int k, float eta, int num_items) {
float mean = GetMean(in, num_items);
float max = GetMax(in, num_items);
float l = 0.0f;
float r = 1.0f;
float threshold = 0.0f;
int nnz = 0;
while (r - l > eta) {
float ratio = l + (r-l)/2;
threshold = mean + ratio * (max - mean);
printf("threshold %f\n", threshold);
nnz = CountNonZero(in, threshold, num_items);
if (nnz > k && nnz < 2*k) {
break;
}
else {
if (nnz < k/2) {
r = ratio;
}
else {
l = ratio;
}
}
}
out_num_ref[0] = GetSparseTensor(in, out_vals_ref, out_indices_ref, threshold, num_items);
}
void TopKLastStepRef(float *in, float *out_vals_ref, int *out_indices_ref, int *out_num_ref, float threshold, int num_items) {
out_num_ref[0] = GetSparseTensor(in, out_vals_ref, out_indices_ref, threshold, num_items);
}
int main(int argc, char *argv[])
{
std::default_random_engine generator;
std::normal_distribution<float> distribution(0.0f, 10.0f);
int num_items = 1024*1024*atoi(argv[1]);
float eta = 0.0001f;
cub::CachingDeviceAllocator g_allocator(true);
float* in = new float[num_items];
float* out_vals = new float[num_items];
int* out_indices = new int[num_items];
int* out_num = new int[1];
float* out_vals_ref = new float[num_items];
int* out_indices_ref = new int[num_items];
int* out_num_ref = new int[1];
for (int i = 0; i < num_items; ++i) {
in[i] = distribution(generator);
}
GpuTimer timer;
for (int i = 1; i < 6; ++i) {
int k = 1024*i;
timer.Start();
float threshold = TrimmedTopK<1024>(g_allocator, in, out_vals, out_indices, out_num, k, eta, num_items);
timer.Stop();
TopKLastStepRef(in, out_vals_ref, out_indices_ref, out_num_ref, threshold, num_items);
// Disabled the total test since different precision of floating point numbers on the GPU and on the CPU.
// TrimmedTopKRef(in, out_vals_ref, out_indices_ref, out_num_ref, k, eta, num_items);
printf("testing for k = %d\n", k);
Verify(out_vals, out_indices, out_num[0], out_vals_ref, out_indices_ref, out_num_ref[0]);
printf("elapsed milliseconds: %f\n", timer.ElapsedMillis());
printf("================================\n");
}
if (in) delete[] in;
if (out_vals) delete[] out_vals;
if (out_indices) delete[] out_indices;
if (out_num) delete[] out_num;
}
|
329bee43bd570cb9d0b90937407726a7704ed954.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// Created by jiashuai on 19-1-23.
//
#include <thundergbm/builder/tree_builder.h>
#include "thundergbm/util/multi_device.h"
#include "thundergbm/util/device_lambda.cuh"
void TreeBuilder::update_tree() {
TIMED_FUNC(timerObj);
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
auto& sp = this->sp[device_id];
auto& tree = this->trees[device_id];
auto sp_data = sp.device_data();
LOG(DEBUG) << sp;
int n_nodes_in_level = sp.size();
Tree::TreeNode *nodes_data = tree.nodes.device_data();
float_type rt_eps = param.rt_eps;
float_type lambda = param.lambda;
device_loop(n_nodes_in_level, [=]__device__(int i) {
float_type best_split_gain = sp_data[i].gain;
if (best_split_gain > rt_eps) {
//do split
if (sp_data[i].nid == -1) return;
int nid = sp_data[i].nid;
Tree::TreeNode &node = nodes_data[nid];
node.gain = best_split_gain;
Tree::TreeNode &lch = nodes_data[node.lch_index];//left child
Tree::TreeNode &rch = nodes_data[node.rch_index];//right child
lch.is_valid = true;
rch.is_valid = true;
node.split_feature_id = sp_data[i].split_fea_id;
GHPair p_missing_gh = sp_data[i].fea_missing_gh;
//todo process begin
node.split_value = sp_data[i].fval;
node.split_bid = sp_data[i].split_bid;
rch.sum_gh_pair = sp_data[i].rch_sum_gh;
if (sp_data[i].default_right) {
rch.sum_gh_pair = rch.sum_gh_pair + p_missing_gh;
node.default_right = true;
}
lch.sum_gh_pair = node.sum_gh_pair - rch.sum_gh_pair;
lch.calc_weight(lambda);
rch.calc_weight(lambda);
} else {
//set leaf
if (sp_data[i].nid == -1) return;
int nid = sp_data[i].nid;
Tree::TreeNode &node = nodes_data[nid];
node.is_leaf = true;
nodes_data[node.lch_index].is_valid = false;
nodes_data[node.rch_index].is_valid = false;
}
});
LOG(DEBUG) << tree.nodes;
});
}
void TreeBuilder::predict_in_training(int k) {
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
auto y_predict_data = y_predict[device_id].device_data() + k * n_instances;
auto nid_data = ins2node_id[device_id].device_data();
const Tree::TreeNode *nodes_data = trees[device_id].nodes.device_data();
auto lr = param.learning_rate;
device_loop(n_instances, [=]__device__(int i) {
int nid = nid_data[i];
while (nid != -1 && (nodes_data[nid].is_pruned)) nid = nodes_data[nid].parent_index;
y_predict_data[i] += lr * nodes_data[nid].base_weight;
});
});
}
void TreeBuilder::init(const DataSet &dataset, const GBMParam ¶m) {
int n_available_device;
hipGetDeviceCount(&n_available_device);
CHECK_GE(n_available_device, param.n_device) << "only " << n_available_device
<< " GPUs available; please set correct number of GPUs to use";
FunctionBuilder::init(dataset, param);
this->n_instances = dataset.n_instances();
trees = vector<Tree>(param.n_device);
ins2node_id = MSyncArray<int>(param.n_device, n_instances);
sp = MSyncArray<SplitPoint>(param.n_device);
has_split = vector<bool>(param.n_device);
int n_outputs = param.num_class * n_instances;
y_predict = MSyncArray<float_type>(param.n_device, n_outputs);
gradients = MSyncArray<GHPair>(param.n_device, n_instances);
}
void TreeBuilder::ins2node_id_all_reduce(int depth) {
//get global ins2node id
{
SyncArray<int> local_ins2node_id(n_instances);
auto local_ins2node_id_data = local_ins2node_id.device_data();
auto global_ins2node_id_data = ins2node_id.front().device_data();
for (int d = 1; d < param.n_device; d++) {
local_ins2node_id.copy_from(ins2node_id[d]);
device_loop(n_instances, [=]__device__(int i) {
global_ins2node_id_data[i] = (global_ins2node_id_data[i] > local_ins2node_id_data[i]) ?
global_ins2node_id_data[i] : local_ins2node_id_data[i];
});
}
}
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
ins2node_id[device_id].copy_from(ins2node_id.front());
});
}
void TreeBuilder::split_point_all_reduce(int depth) {
TIMED_FUNC(timerObj);
//get global best split of each node
int n_nodes_in_level = 1 << depth;//2^i
int nid_offset = (1 << depth) - 1;//2^i - 1
auto global_sp_data = sp.front().host_data();
vector<bool> active_sp(n_nodes_in_level);
for (int device_id = 0; device_id < param.n_device; device_id++) {
auto local_sp_data = sp[device_id].host_data();
for (int j = 0; j < sp[device_id].size(); j++) {
int sp_nid = local_sp_data[j].nid;
if (sp_nid == -1) continue;
int global_pos = sp_nid - nid_offset;
if (!active_sp[global_pos])
global_sp_data[global_pos] = local_sp_data[j];
else
global_sp_data[global_pos] = (global_sp_data[global_pos].gain >= local_sp_data[j].gain)
?
global_sp_data[global_pos] : local_sp_data[j];
active_sp[global_pos] = true;
}
}
//set inactive sp
for (int n = 0; n < n_nodes_in_level; n++) {
if (!active_sp[n])
global_sp_data[n].nid = -1;
}
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
sp[device_id].copy_from(sp.front());
});
LOG(DEBUG) << "global best split point = " << sp.front();
}
vector<Tree> TreeBuilder::build_approximate(const MSyncArray<GHPair> &gradients) {
vector<Tree> trees(param.tree_per_rounds);
TIMED_FUNC(timerObj);
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
this->shards[device_id].column_sampling(param.column_sampling_rate);
});
for (int k = 0; k < param.tree_per_rounds; ++k) {
Tree &tree = trees[k];
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
this->ins2node_id[device_id].resize(n_instances);
this->gradients[device_id].set_device_data(const_cast<GHPair *>(gradients[device_id].device_data() + k * n_instances));
this->trees[device_id].init2(this->gradients[device_id], param);
});
for (int level = 0; level < param.depth; ++level) {
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
find_split(level, device_id);
});
split_point_all_reduce(level);
{
TIMED_SCOPE(timerObj, "apply sp");
update_tree();
update_ins2node_id();
{
LOG(TRACE) << "gathering ins2node id";
//get final result of the reset instance id to node id
bool has_split = false;
for (int d = 0; d < param.n_device; d++) {
has_split |= this->has_split[d];
}
if (!has_split) {
LOG(INFO) << "no splittable nodes, stop";
break;
}
}
ins2node_id_all_reduce(level);
}
}
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
this->trees[device_id].prune_self(param.gamma);
});
predict_in_training(k);
tree.nodes.resize(this->trees.front().nodes.size());
tree.nodes.copy_from(this->trees.front().nodes);
}
return trees;
}
|
329bee43bd570cb9d0b90937407726a7704ed954.cu
|
//
// Created by jiashuai on 19-1-23.
//
#include <thundergbm/builder/tree_builder.h>
#include "thundergbm/util/multi_device.h"
#include "thundergbm/util/device_lambda.cuh"
void TreeBuilder::update_tree() {
TIMED_FUNC(timerObj);
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
auto& sp = this->sp[device_id];
auto& tree = this->trees[device_id];
auto sp_data = sp.device_data();
LOG(DEBUG) << sp;
int n_nodes_in_level = sp.size();
Tree::TreeNode *nodes_data = tree.nodes.device_data();
float_type rt_eps = param.rt_eps;
float_type lambda = param.lambda;
device_loop(n_nodes_in_level, [=]__device__(int i) {
float_type best_split_gain = sp_data[i].gain;
if (best_split_gain > rt_eps) {
//do split
if (sp_data[i].nid == -1) return;
int nid = sp_data[i].nid;
Tree::TreeNode &node = nodes_data[nid];
node.gain = best_split_gain;
Tree::TreeNode &lch = nodes_data[node.lch_index];//left child
Tree::TreeNode &rch = nodes_data[node.rch_index];//right child
lch.is_valid = true;
rch.is_valid = true;
node.split_feature_id = sp_data[i].split_fea_id;
GHPair p_missing_gh = sp_data[i].fea_missing_gh;
//todo process begin
node.split_value = sp_data[i].fval;
node.split_bid = sp_data[i].split_bid;
rch.sum_gh_pair = sp_data[i].rch_sum_gh;
if (sp_data[i].default_right) {
rch.sum_gh_pair = rch.sum_gh_pair + p_missing_gh;
node.default_right = true;
}
lch.sum_gh_pair = node.sum_gh_pair - rch.sum_gh_pair;
lch.calc_weight(lambda);
rch.calc_weight(lambda);
} else {
//set leaf
if (sp_data[i].nid == -1) return;
int nid = sp_data[i].nid;
Tree::TreeNode &node = nodes_data[nid];
node.is_leaf = true;
nodes_data[node.lch_index].is_valid = false;
nodes_data[node.rch_index].is_valid = false;
}
});
LOG(DEBUG) << tree.nodes;
});
}
void TreeBuilder::predict_in_training(int k) {
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
auto y_predict_data = y_predict[device_id].device_data() + k * n_instances;
auto nid_data = ins2node_id[device_id].device_data();
const Tree::TreeNode *nodes_data = trees[device_id].nodes.device_data();
auto lr = param.learning_rate;
device_loop(n_instances, [=]__device__(int i) {
int nid = nid_data[i];
while (nid != -1 && (nodes_data[nid].is_pruned)) nid = nodes_data[nid].parent_index;
y_predict_data[i] += lr * nodes_data[nid].base_weight;
});
});
}
void TreeBuilder::init(const DataSet &dataset, const GBMParam ¶m) {
int n_available_device;
cudaGetDeviceCount(&n_available_device);
CHECK_GE(n_available_device, param.n_device) << "only " << n_available_device
<< " GPUs available; please set correct number of GPUs to use";
FunctionBuilder::init(dataset, param);
this->n_instances = dataset.n_instances();
trees = vector<Tree>(param.n_device);
ins2node_id = MSyncArray<int>(param.n_device, n_instances);
sp = MSyncArray<SplitPoint>(param.n_device);
has_split = vector<bool>(param.n_device);
int n_outputs = param.num_class * n_instances;
y_predict = MSyncArray<float_type>(param.n_device, n_outputs);
gradients = MSyncArray<GHPair>(param.n_device, n_instances);
}
void TreeBuilder::ins2node_id_all_reduce(int depth) {
//get global ins2node id
{
SyncArray<int> local_ins2node_id(n_instances);
auto local_ins2node_id_data = local_ins2node_id.device_data();
auto global_ins2node_id_data = ins2node_id.front().device_data();
for (int d = 1; d < param.n_device; d++) {
local_ins2node_id.copy_from(ins2node_id[d]);
device_loop(n_instances, [=]__device__(int i) {
global_ins2node_id_data[i] = (global_ins2node_id_data[i] > local_ins2node_id_data[i]) ?
global_ins2node_id_data[i] : local_ins2node_id_data[i];
});
}
}
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
ins2node_id[device_id].copy_from(ins2node_id.front());
});
}
void TreeBuilder::split_point_all_reduce(int depth) {
TIMED_FUNC(timerObj);
//get global best split of each node
int n_nodes_in_level = 1 << depth;//2^i
int nid_offset = (1 << depth) - 1;//2^i - 1
auto global_sp_data = sp.front().host_data();
vector<bool> active_sp(n_nodes_in_level);
for (int device_id = 0; device_id < param.n_device; device_id++) {
auto local_sp_data = sp[device_id].host_data();
for (int j = 0; j < sp[device_id].size(); j++) {
int sp_nid = local_sp_data[j].nid;
if (sp_nid == -1) continue;
int global_pos = sp_nid - nid_offset;
if (!active_sp[global_pos])
global_sp_data[global_pos] = local_sp_data[j];
else
global_sp_data[global_pos] = (global_sp_data[global_pos].gain >= local_sp_data[j].gain)
?
global_sp_data[global_pos] : local_sp_data[j];
active_sp[global_pos] = true;
}
}
//set inactive sp
for (int n = 0; n < n_nodes_in_level; n++) {
if (!active_sp[n])
global_sp_data[n].nid = -1;
}
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
sp[device_id].copy_from(sp.front());
});
LOG(DEBUG) << "global best split point = " << sp.front();
}
vector<Tree> TreeBuilder::build_approximate(const MSyncArray<GHPair> &gradients) {
vector<Tree> trees(param.tree_per_rounds);
TIMED_FUNC(timerObj);
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
this->shards[device_id].column_sampling(param.column_sampling_rate);
});
for (int k = 0; k < param.tree_per_rounds; ++k) {
Tree &tree = trees[k];
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
this->ins2node_id[device_id].resize(n_instances);
this->gradients[device_id].set_device_data(const_cast<GHPair *>(gradients[device_id].device_data() + k * n_instances));
this->trees[device_id].init2(this->gradients[device_id], param);
});
for (int level = 0; level < param.depth; ++level) {
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
find_split(level, device_id);
});
split_point_all_reduce(level);
{
TIMED_SCOPE(timerObj, "apply sp");
update_tree();
update_ins2node_id();
{
LOG(TRACE) << "gathering ins2node id";
//get final result of the reset instance id to node id
bool has_split = false;
for (int d = 0; d < param.n_device; d++) {
has_split |= this->has_split[d];
}
if (!has_split) {
LOG(INFO) << "no splittable nodes, stop";
break;
}
}
ins2node_id_all_reduce(level);
}
}
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
this->trees[device_id].prune_self(param.gamma);
});
predict_in_training(k);
tree.nodes.resize(this->trees.front().nodes.size());
tree.nodes.copy_from(this->trees.front().nodes);
}
return trees;
}
|
cb41031518342e7b4f1f4504be065525df64f77c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "svmClassify.h"
#include "svmClassifyKernels.h"
namespace gpusvm {
/************
* This file contains the CUDA functions necessary for SVM Classification
*/
/**
* This function computes self dot products (Euclidean norm squared) for every vector in an array
* @param devSource the vectors, in column major format
* @param devSourcePitchInFloats the pitch of each row of the vectors (this is guaranteed to be >= sourceCount. It might be greater due to padding, to keep each row of the source vectors aligned.
* @param devDest a vector which will receive the self dot product
* @param sourceCount the number of vectors
* @param sourceLength the dimensionality of each vector
*/
__global__ void makeSelfDots(float* devSource, int devSourcePitchInFloats, float* devDest, int sourceCount, int sourceLength) {
float dot = 0;
int index = GPUSVM_BLOCKSIZE * blockIdx.x + threadIdx.x;
if (index < sourceCount) {
for (int i = 0; i < sourceLength; i++) {
float currentElement = *(devSource + GPUSVM_IMUL(devSourcePitchInFloats, i) + index);
dot = dot + currentElement * currentElement;
}
*(devDest + index) = dot;
}
}
/**
* This function constructs a matrix devDots, where devDots_(i,j) = ||data_i||^2 + ||SV_j||^2
* @param devDots the output array
* @param devDotsPitchInFloats the pitch of each row of devDots. Guaranteed to be >= nSV
* @param devSVDots a vector containing ||SV_j||^2 for all j in [0, nSV - 1]
* @param devDataDots a vector containing ||data_i||^2 for all i in [0, nPoints - 1]
* @param nSV the number of Support Vectors in the classifier
*/
__global__ void makeDots(float* devDots, int devDotsPitchInFloats, float* devSVDots, float* devDataDots, int nSV, int nPoints) {
__shared__ float localSVDots[GPUSVM_BLOCKSIZE];
__shared__ float localDataDots[GPUSVM_BLOCKSIZE];
int svIndex = GPUSVM_IMUL(GPUSVM_BLOCKSIZE, blockIdx.x) + threadIdx.x;
if (svIndex < nSV) {
localSVDots[threadIdx.x] = *(devSVDots + svIndex);
}
int dataIndex = GPUSVM_BLOCKSIZE * blockIdx.y + threadIdx.x;
if (dataIndex < nPoints) {
localDataDots[threadIdx.x] = *(devDataDots + dataIndex);
}
__syncthreads();
dataIndex = GPUSVM_BLOCKSIZE * blockIdx.y;
for(int i = 0; i < GPUSVM_BLOCKSIZE; i++, dataIndex++) {
if ((svIndex < nSV) && (dataIndex < nPoints)) {
*(devDots + GPUSVM_IMUL(devDotsPitchInFloats, dataIndex) + svIndex) = localSVDots[threadIdx.x] + localDataDots[i];
}
}
}
__device__ void computeKernels(float* devNorms, int devNormsPitchInFloats, float* devAlphas, int nPoints, int nSV, int kernelType, float coef0, int degree, float* localValue, int svIndex) {
if (svIndex < nSV) {
float alpha = devAlphas[svIndex];
float norm = devNorms[GPUSVM_IMUL(devNormsPitchInFloats, blockIdx.y) + svIndex];
if(kernelType == GPUSVM_RBF)
{
localValue[threadIdx.x] = alpha * exp(norm);
}
else if(kernelType == GPUSVM_LINEAR)
{
localValue[threadIdx.x] = alpha * norm;
}
else if(kernelType == GPUSVM_POLYNOMIAL)
{
localValue[threadIdx.x] = alpha * pow(norm + coef0, degree);
}
else if(kernelType == GPUSVM_SIGMOID)
{
localValue[threadIdx.x] = alpha * tanh(norm + coef0);
}
}
}
/**
* This function completes the kernel evaluations and begins the reductions to form the classification result.
* @param devNorms this contains partially completed kernel evaluations. For most kernels, devNorms_(i, j) = data_i (dot) sv_j. For the RBF kernel, devNorms_(i, j) = -gamma*(||data_i||^2 + ||sv_j||^2 - 2* data_i (dot) sv_j)
* @param devNormsPitchInFloats contains the pitch of the partially completed kernel evaluations. It will always be >= nSV.
* @param devAlphas this is the alpha vector for the SVM classifier
* @param nPoints the number of data points
* @param nSV the number of support vectors
* @param kernelType the type of kernel
* @param coef0 a coefficient used in the polynomial & sigmoid kernels
* @param degree the degree used in the polynomial kernel
* @param devLocalValue the local classification results
* @param reduceOffset computed to begin the reduction properly
*/
__global__ void computeKernelsReduce(float* devNorms, int devNormsPitchInFloats, float* devAlphas, int nPoints, int nSV, int kernelType, float coef0, int degree, float* devLocalValue, int reduceOffset) {
/*Dynamic shared memory setup*/
extern __shared__ float localValue[];
int svIndex = blockDim.x * blockIdx.x + threadIdx.x;
computeKernels(devNorms, devNormsPitchInFloats, devAlphas, nPoints, nSV, kernelType, coef0, degree, localValue, svIndex);
__syncthreads();
/*reduction*/
for(int offset = reduceOffset; offset >= 1; offset = offset >> 1) {
if ((threadIdx.x < offset) && (svIndex + offset < nSV)) {
int compOffset = threadIdx.x + offset;
localValue[threadIdx.x] = localValue[threadIdx.x] + localValue[compOffset];
}
__syncthreads();
}
if (threadIdx.x == 0) {
devLocalValue[blockIdx.x + gridDim.x*blockIdx.y] = localValue[0];
}
}
/*Second stage reduce and cleanup function*/
__global__ void doClassification(float* devResult, float b, float* devLocalValue, int reduceOffset, int nPoints) {
extern __shared__ float localValue[];
localValue[threadIdx.x] = devLocalValue[blockDim.x*blockIdx.y + threadIdx.x];
__syncthreads();
for(int offset = reduceOffset; offset >= 1; offset = offset >> 1) {
if (threadIdx.x < offset) {
int compOffset = threadIdx.x + offset;
if (compOffset < blockDim.x) {
localValue[threadIdx.x] = localValue[threadIdx.x] + localValue[compOffset];
}
}
__syncthreads();
}
float sumResult = localValue[0];
if (threadIdx.x == 0) {
sumResult += b;
devResult[blockIdx.y] = sumResult;
}
}
}
|
cb41031518342e7b4f1f4504be065525df64f77c.cu
|
#include "svmClassify.h"
#include "svmClassifyKernels.h"
namespace gpusvm {
/************
* This file contains the CUDA functions necessary for SVM Classification
*/
/**
* This function computes self dot products (Euclidean norm squared) for every vector in an array
* @param devSource the vectors, in column major format
* @param devSourcePitchInFloats the pitch of each row of the vectors (this is guaranteed to be >= sourceCount. It might be greater due to padding, to keep each row of the source vectors aligned.
* @param devDest a vector which will receive the self dot product
* @param sourceCount the number of vectors
* @param sourceLength the dimensionality of each vector
*/
__global__ void makeSelfDots(float* devSource, int devSourcePitchInFloats, float* devDest, int sourceCount, int sourceLength) {
float dot = 0;
int index = GPUSVM_BLOCKSIZE * blockIdx.x + threadIdx.x;
if (index < sourceCount) {
for (int i = 0; i < sourceLength; i++) {
float currentElement = *(devSource + GPUSVM_IMUL(devSourcePitchInFloats, i) + index);
dot = dot + currentElement * currentElement;
}
*(devDest + index) = dot;
}
}
/**
* This function constructs a matrix devDots, where devDots_(i,j) = ||data_i||^2 + ||SV_j||^2
* @param devDots the output array
* @param devDotsPitchInFloats the pitch of each row of devDots. Guaranteed to be >= nSV
* @param devSVDots a vector containing ||SV_j||^2 for all j in [0, nSV - 1]
* @param devDataDots a vector containing ||data_i||^2 for all i in [0, nPoints - 1]
* @param nSV the number of Support Vectors in the classifier
*/
__global__ void makeDots(float* devDots, int devDotsPitchInFloats, float* devSVDots, float* devDataDots, int nSV, int nPoints) {
__shared__ float localSVDots[GPUSVM_BLOCKSIZE];
__shared__ float localDataDots[GPUSVM_BLOCKSIZE];
int svIndex = GPUSVM_IMUL(GPUSVM_BLOCKSIZE, blockIdx.x) + threadIdx.x;
if (svIndex < nSV) {
localSVDots[threadIdx.x] = *(devSVDots + svIndex);
}
int dataIndex = GPUSVM_BLOCKSIZE * blockIdx.y + threadIdx.x;
if (dataIndex < nPoints) {
localDataDots[threadIdx.x] = *(devDataDots + dataIndex);
}
__syncthreads();
dataIndex = GPUSVM_BLOCKSIZE * blockIdx.y;
for(int i = 0; i < GPUSVM_BLOCKSIZE; i++, dataIndex++) {
if ((svIndex < nSV) && (dataIndex < nPoints)) {
*(devDots + GPUSVM_IMUL(devDotsPitchInFloats, dataIndex) + svIndex) = localSVDots[threadIdx.x] + localDataDots[i];
}
}
}
__device__ void computeKernels(float* devNorms, int devNormsPitchInFloats, float* devAlphas, int nPoints, int nSV, int kernelType, float coef0, int degree, float* localValue, int svIndex) {
if (svIndex < nSV) {
float alpha = devAlphas[svIndex];
float norm = devNorms[GPUSVM_IMUL(devNormsPitchInFloats, blockIdx.y) + svIndex];
if(kernelType == GPUSVM_RBF)
{
localValue[threadIdx.x] = alpha * exp(norm);
}
else if(kernelType == GPUSVM_LINEAR)
{
localValue[threadIdx.x] = alpha * norm;
}
else if(kernelType == GPUSVM_POLYNOMIAL)
{
localValue[threadIdx.x] = alpha * pow(norm + coef0, degree);
}
else if(kernelType == GPUSVM_SIGMOID)
{
localValue[threadIdx.x] = alpha * tanh(norm + coef0);
}
}
}
/**
* This function completes the kernel evaluations and begins the reductions to form the classification result.
* @param devNorms this contains partially completed kernel evaluations. For most kernels, devNorms_(i, j) = data_i (dot) sv_j. For the RBF kernel, devNorms_(i, j) = -gamma*(||data_i||^2 + ||sv_j||^2 - 2* data_i (dot) sv_j)
* @param devNormsPitchInFloats contains the pitch of the partially completed kernel evaluations. It will always be >= nSV.
* @param devAlphas this is the alpha vector for the SVM classifier
* @param nPoints the number of data points
* @param nSV the number of support vectors
* @param kernelType the type of kernel
* @param coef0 a coefficient used in the polynomial & sigmoid kernels
* @param degree the degree used in the polynomial kernel
* @param devLocalValue the local classification results
* @param reduceOffset computed to begin the reduction properly
*/
__global__ void computeKernelsReduce(float* devNorms, int devNormsPitchInFloats, float* devAlphas, int nPoints, int nSV, int kernelType, float coef0, int degree, float* devLocalValue, int reduceOffset) {
/*Dynamic shared memory setup*/
extern __shared__ float localValue[];
int svIndex = blockDim.x * blockIdx.x + threadIdx.x;
computeKernels(devNorms, devNormsPitchInFloats, devAlphas, nPoints, nSV, kernelType, coef0, degree, localValue, svIndex);
__syncthreads();
/*reduction*/
for(int offset = reduceOffset; offset >= 1; offset = offset >> 1) {
if ((threadIdx.x < offset) && (svIndex + offset < nSV)) {
int compOffset = threadIdx.x + offset;
localValue[threadIdx.x] = localValue[threadIdx.x] + localValue[compOffset];
}
__syncthreads();
}
if (threadIdx.x == 0) {
devLocalValue[blockIdx.x + gridDim.x*blockIdx.y] = localValue[0];
}
}
/*Second stage reduce and cleanup function*/
__global__ void doClassification(float* devResult, float b, float* devLocalValue, int reduceOffset, int nPoints) {
extern __shared__ float localValue[];
localValue[threadIdx.x] = devLocalValue[blockDim.x*blockIdx.y + threadIdx.x];
__syncthreads();
for(int offset = reduceOffset; offset >= 1; offset = offset >> 1) {
if (threadIdx.x < offset) {
int compOffset = threadIdx.x + offset;
if (compOffset < blockDim.x) {
localValue[threadIdx.x] = localValue[threadIdx.x] + localValue[compOffset];
}
}
__syncthreads();
}
float sumResult = localValue[0];
if (threadIdx.x == 0) {
sumResult += b;
devResult[blockIdx.y] = sumResult;
}
}
}
|
a3d5d403487199f01912a7df41189542c39c9a6c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "call_kALACSearch.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int16_t *mCoefsU = NULL;
hipMalloc(&mCoefsU, XSIZE*YSIZE);
int16_t *mCoefsV = NULL;
hipMalloc(&mCoefsV, XSIZE*YSIZE);
int32_t kALACMaxCoefs = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
call_kALACSearch), dim3(gridBlock),dim3(threadBlock), 0, 0, mCoefsU,mCoefsV,kALACMaxCoefs);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
call_kALACSearch), dim3(gridBlock),dim3(threadBlock), 0, 0, mCoefsU,mCoefsV,kALACMaxCoefs);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
call_kALACSearch), dim3(gridBlock),dim3(threadBlock), 0, 0, mCoefsU,mCoefsV,kALACMaxCoefs);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
a3d5d403487199f01912a7df41189542c39c9a6c.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "call_kALACSearch.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int16_t *mCoefsU = NULL;
cudaMalloc(&mCoefsU, XSIZE*YSIZE);
int16_t *mCoefsV = NULL;
cudaMalloc(&mCoefsV, XSIZE*YSIZE);
int32_t kALACMaxCoefs = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
call_kALACSearch<<<gridBlock,threadBlock>>>(mCoefsU,mCoefsV,kALACMaxCoefs);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
call_kALACSearch<<<gridBlock,threadBlock>>>(mCoefsU,mCoefsV,kALACMaxCoefs);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
call_kALACSearch<<<gridBlock,threadBlock>>>(mCoefsU,mCoefsV,kALACMaxCoefs);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
7f41867c30fb80d3e72a87d78473285bd462fbb2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void kRejectMinimizationStep_kernel( int numAtoms, float4 *posq, float4 *oldPosq ) {
for( int atom = threadIdx.x + blockIdx.x * blockDim.x; atom < numAtoms; atom += blockDim.x * gridDim.x ) {
posq[atom] = oldPosq[atom];
}
}
extern "C" __global__ void kAcceptMinimizationStep_kernel( int numAtoms, float4 *posq, float4 *oldPosq ) {
for( int atom = threadIdx.x + blockIdx.x * blockDim.x; atom < numAtoms; atom += blockDim.x * gridDim.x ) {
oldPosq[atom] = posq[atom];
}
}
|
7f41867c30fb80d3e72a87d78473285bd462fbb2.cu
|
extern "C" __global__ void kRejectMinimizationStep_kernel( int numAtoms, float4 *posq, float4 *oldPosq ) {
for( int atom = threadIdx.x + blockIdx.x * blockDim.x; atom < numAtoms; atom += blockDim.x * gridDim.x ) {
posq[atom] = oldPosq[atom];
}
}
extern "C" __global__ void kAcceptMinimizationStep_kernel( int numAtoms, float4 *posq, float4 *oldPosq ) {
for( int atom = threadIdx.x + blockIdx.x * blockDim.x; atom < numAtoms; atom += blockDim.x * gridDim.x ) {
oldPosq[atom] = posq[atom];
}
}
|
5cfc6c28418c63bd4f974a141d4d9ab83d8b63f3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Starting file for the cuda version.
You do not have to do the same multple level block for this to work
*/
#include<stdio.h>
const char* dgemm_desc = "CUDA dgemm";
#if !defined(BLOCK_SIZE)
#define BLOCK_SIZE 512
#endif
#if !defined(CUDA_BLOCK_SIZE)
#define CUDA_BLOCK_SIZE 16
#endif
#if !defined(CUDA_GRID_SIZE)
#define CUDA_GRID_SIZE 32
#endif
#define min(a,b) (((a)<(b))?(a):(b))
// Perform smaller dgemm operation using CUDA
__global__ void do_block(int lda, double*matA, double* matB, double* matC, int M, int N, int K) {
int row = threadIdx.y + blockIdx.y*CUDA_BLOCK_SIZE;
int col = threadIdx.x + blockIdx.x*CUDA_BLOCK_SIZE;;
double c;
//double a, b, c;
//printf("blockIdx.x: %i | blockIdx.y: %i | gridDim.x: %i | gridDim.y: %i\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y);
if (row < M && col < N) {
//printf("row: %i | col: %i\n", row, col);
c = matC[row + col*lda];
for (int k = 0; k < K; ++k) {
//a = matA[k*lda + row];
//b = matB[col*lda + k];
//c += a * b;
c += matA[k*lda + row] * matB[col*lda + k];
}
matC[row + col*lda] = c;
}
}
__global__ void printDeviceMatrix(double* mat, int lda) {
for (int i = 0; i < lda; ++i) {
for (int j = 0; j < lda; ++j) {
if (mat[i*lda + j] > 0) printf(" ");
printf("%.0f ", mat[i*lda + j]);
}
printf("\n");
}
}
/* This routine performs a dgemm operation
* C := C + A * B
* where A, B, and C are lda-by-lda matrices stored in column-major format.
* On exit, A and B maintain their input values. */
void square_dgemm (int lda, double* A, double* B, double* C) {
// transfer the matrices over, allocate size for C
size_t matSize = lda * lda * sizeof(double);
double* matA;
double* matB;
double* matC;
hipMalloc(&matA, matSize);
hipMalloc(&matB, matSize);
hipMalloc(&matC, matSize);
hipMemcpy(matA, A, matSize, hipMemcpyHostToDevice);
hipMemcpy(matB, B, matSize, hipMemcpyHostToDevice);
/* For each block-row of A */
for (int i = 0; i < lda; i += BLOCK_SIZE) {
/* For each block-column of B */
for (int j = 0; j < lda; j += BLOCK_SIZE) {
/* Accumulate block dgemms into block of C */
for (int k = 0; k < lda; k += BLOCK_SIZE) {
/* Correct block dimensions if block "goes off edge of" the matrix */
int M = min(BLOCK_SIZE, lda-i);
int N = min(BLOCK_SIZE, lda-j);
int K = min(BLOCK_SIZE, lda-k);
//int gridSize = BLOCK_SIZE / CUDA_BLOCK_SIZE;
//dim3 gridDim(gridSize, gridSize);
dim3 gridDim(CUDA_GRID_SIZE, CUDA_GRID_SIZE);
dim3 blockDim(CUDA_BLOCK_SIZE, CUDA_BLOCK_SIZE);
/* Perform individual block dgemm */
// printf("i: %i; j: %i; k: %i\n", i, j, k);
hipLaunchKernelGGL(( do_block), dim3(gridDim), dim3(blockDim), 0, 0, lda, matA + k*lda + i, matB + j*lda + k, matC + i + j*lda, M, N, K);
}
}
}
hipDeviceSynchronize();
hipMemcpy(C, matC, matSize, hipMemcpyDeviceToHost);
//printDeviceMatrix<<<1, 1>>>(matC, lda);
/* for (int i = 0; i < lda; ++i) { */
/* for (int j = 0; j < lda; ++j) { */
/* printf("%.2f ", C[i*lda + j]); */
/* } */
/* printf("\n"); */
/* } */
}
|
5cfc6c28418c63bd4f974a141d4d9ab83d8b63f3.cu
|
/*
Starting file for the cuda version.
You do not have to do the same multple level block for this to work
*/
#include<stdio.h>
const char* dgemm_desc = "CUDA dgemm";
#if !defined(BLOCK_SIZE)
#define BLOCK_SIZE 512
#endif
#if !defined(CUDA_BLOCK_SIZE)
#define CUDA_BLOCK_SIZE 16
#endif
#if !defined(CUDA_GRID_SIZE)
#define CUDA_GRID_SIZE 32
#endif
#define min(a,b) (((a)<(b))?(a):(b))
// Perform smaller dgemm operation using CUDA
__global__ void do_block(int lda, double*matA, double* matB, double* matC, int M, int N, int K) {
int row = threadIdx.y + blockIdx.y*CUDA_BLOCK_SIZE;
int col = threadIdx.x + blockIdx.x*CUDA_BLOCK_SIZE;;
double c;
//double a, b, c;
//printf("blockIdx.x: %i | blockIdx.y: %i | gridDim.x: %i | gridDim.y: %i\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y);
if (row < M && col < N) {
//printf("row: %i | col: %i\n", row, col);
c = matC[row + col*lda];
for (int k = 0; k < K; ++k) {
//a = matA[k*lda + row];
//b = matB[col*lda + k];
//c += a * b;
c += matA[k*lda + row] * matB[col*lda + k];
}
matC[row + col*lda] = c;
}
}
__global__ void printDeviceMatrix(double* mat, int lda) {
for (int i = 0; i < lda; ++i) {
for (int j = 0; j < lda; ++j) {
if (mat[i*lda + j] > 0) printf(" ");
printf("%.0f ", mat[i*lda + j]);
}
printf("\n");
}
}
/* This routine performs a dgemm operation
* C := C + A * B
* where A, B, and C are lda-by-lda matrices stored in column-major format.
* On exit, A and B maintain their input values. */
void square_dgemm (int lda, double* A, double* B, double* C) {
// transfer the matrices over, allocate size for C
size_t matSize = lda * lda * sizeof(double);
double* matA;
double* matB;
double* matC;
cudaMalloc(&matA, matSize);
cudaMalloc(&matB, matSize);
cudaMalloc(&matC, matSize);
cudaMemcpy(matA, A, matSize, cudaMemcpyHostToDevice);
cudaMemcpy(matB, B, matSize, cudaMemcpyHostToDevice);
/* For each block-row of A */
for (int i = 0; i < lda; i += BLOCK_SIZE) {
/* For each block-column of B */
for (int j = 0; j < lda; j += BLOCK_SIZE) {
/* Accumulate block dgemms into block of C */
for (int k = 0; k < lda; k += BLOCK_SIZE) {
/* Correct block dimensions if block "goes off edge of" the matrix */
int M = min(BLOCK_SIZE, lda-i);
int N = min(BLOCK_SIZE, lda-j);
int K = min(BLOCK_SIZE, lda-k);
//int gridSize = BLOCK_SIZE / CUDA_BLOCK_SIZE;
//dim3 gridDim(gridSize, gridSize);
dim3 gridDim(CUDA_GRID_SIZE, CUDA_GRID_SIZE);
dim3 blockDim(CUDA_BLOCK_SIZE, CUDA_BLOCK_SIZE);
/* Perform individual block dgemm */
// printf("i: %i; j: %i; k: %i\n", i, j, k);
do_block<<<gridDim, blockDim>>>(lda, matA + k*lda + i, matB + j*lda + k, matC + i + j*lda, M, N, K);
}
}
}
cudaDeviceSynchronize();
cudaMemcpy(C, matC, matSize, cudaMemcpyDeviceToHost);
//printDeviceMatrix<<<1, 1>>>(matC, lda);
/* for (int i = 0; i < lda; ++i) { */
/* for (int j = 0; j < lda; ++j) { */
/* printf("%.2f ", C[i*lda + j]); */
/* } */
/* printf("\n"); */
/* } */
}
|
bee3b39c652bb657eb3a71e0f7800bdca040d099.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "do_sum_merge.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *datas = NULL;
hipMalloc(&datas, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
do_sum_merge), dim3(gridBlock),dim3(threadBlock), 0, 0, datas,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
do_sum_merge), dim3(gridBlock),dim3(threadBlock), 0, 0, datas,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
do_sum_merge), dim3(gridBlock),dim3(threadBlock), 0, 0, datas,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
bee3b39c652bb657eb3a71e0f7800bdca040d099.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "do_sum_merge.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *datas = NULL;
cudaMalloc(&datas, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
do_sum_merge<<<gridBlock,threadBlock>>>(datas,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
do_sum_merge<<<gridBlock,threadBlock>>>(datas,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
do_sum_merge<<<gridBlock,threadBlock>>>(datas,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
c0580ac6cc266409fc1d58d10ecc67a63b00ed00.hip
|
// !!! This is a file automatically generated by hipify!!!
/* created by Suva Shahria*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
#include <iostream>
#include <string>
#include <fstream>
#include <queue>
#include <time.h>
#include <cstring>
#include <cstdlib>
#include <iostream>
#include <time.h>
using namespace std;
/*
Given a vertex, different thread checks each vertex to see if there is an edge
connecting the vertices.
*/
__global__
void cuda_bfs(int v, int idx, int * dmat, bool * d_visited, int * d_push) {
int index = idx * v;
if (threadIdx.x < v) {
if (dmat[index + threadIdx.x] && !d_visited[threadIdx.x]) {
//printf("%d-----%d\n", index + threadIdx.x, threadIdx.x);
// printf("%d\n", threadIdx.x);
d_visited[threadIdx.x] = true;
d_push[threadIdx.x] = 1;
}
}
/*if (threadIdx.x < v) {
if (visited[threadIdx.x] == true) {
printf("%d\n", threadIdx.x);
}
}
/*if (threadIdx.x < v*v) {
if (dmat[threadIdx.x] == 1) {
printf("%d\n", threadIdx.x);
}
}
*/
}
int main(int arg, char** argv) {
int* mat;
int i;
int v;
char single;
if (arg != 3)
{
printf("usage: ./out size, starting_index example: ./t 5 2 \n");
return -1;
}
v = atoi(argv[1]);
int start = atoi(argv[2]);
if (start >= v || start<0) {
printf("start index is out of bounds \n");
return -1;
}
FILE *pToFile = fopen("graph.txt", "r");
i = 0;
//
mat = (int*)malloc(v *v * sizeof(int));
//read from mygraph.txt
while ((single = fgetc(pToFile)) != EOF) {
if (single != '\n') {
if (single == '1') {
// cout << i << endl;
mat[i] = 1;
//cout << mat[i] << endl;
}
else {
mat[i] = 0;
}
i++;
}
}
fclose(pToFile);
int * dmat;
// create device objects
hipMalloc((void**)&dmat, sizeof(int) * v*v);
hipMemcpy((void*)dmat, (void*)mat, sizeof(int)*v*v, hipMemcpyHostToDevice);
bool* visited = (bool*)malloc(v * sizeof(bool)); //visited
bool* d_visited;
for (int i = 0; i < v; i++) {
visited[i] = false;
}
visited[start] = true;
hipMalloc((void**)&d_visited, sizeof(bool) * v);
hipMemcpy((void*)d_visited, (void*)visited, sizeof(bool)*v, hipMemcpyHostToDevice);
queue<int> q; //queue
q.push(start);
int* h_push = (int*)malloc(v * sizeof(int)); //h push
int* d_push;
for (i = 0; i < v; i++) {
h_push[i] = 0;
}
hipMalloc((void**)&d_push, sizeof(int) * v);
hipMemcpy((void*)d_push, (void*)h_push, sizeof(int)*v, hipMemcpyHostToDevice);
hipEvent_t st1, stop;
hipEventCreate(&st1);
hipEventCreate(&stop);
float milliseconds = 0;
int j = 0;;
hipEventRecord(st1);
// once vertex is found to be a neighbor adds it to end
while (!q.empty()) {
for (i = 0; i < v; i++) {
h_push[i] = 0;
}
hipMemcpy((void*)d_push, (void*)h_push, sizeof(int)*v, hipMemcpyHostToDevice);
// hipMemcpy((void*)d_visited, (void*)visited, sizeof(bool)*v, hipMemcpyHostToDevice);
i = q.front();
cout << q.front() << " ";
q.pop();
cuda_bfs << <1, v >> >(v, i, dmat, d_visited, d_push);
hipMemcpy((void*)h_push, (void*)d_push, sizeof(int) * v, hipMemcpyDeviceToHost);
// hipMemcpy((void*)d_visited, (void*)visited, sizeof(bool) * v, hipMemcpyDeviceToHost);
// cout << h_push[1];
for (j = 0; j < v; j++) {
if (h_push[j] == 1) {
//cout << j;
q.push(j);
}
}
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, st1, stop);
cout << endl << milliseconds << " ms" <<endl;
hipFree(dmat);
hipFree(d_visited);
hipFree(d_push);
//
//cuda_bfs << <1, v >> >(v,i, dmat,d_visited, d_push);
return 0;
}
|
c0580ac6cc266409fc1d58d10ecc67a63b00ed00.cu
|
/* created by Suva Shahria*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
#include <iostream>
#include <string>
#include <fstream>
#include <queue>
#include <time.h>
#include <cstring>
#include <cstdlib>
#include <iostream>
#include <time.h>
using namespace std;
/*
Given a vertex, different thread checks each vertex to see if there is an edge
connecting the vertices.
*/
__global__
void cuda_bfs(int v, int idx, int * dmat, bool * d_visited, int * d_push) {
int index = idx * v;
if (threadIdx.x < v) {
if (dmat[index + threadIdx.x] && !d_visited[threadIdx.x]) {
//printf("%d-----%d\n", index + threadIdx.x, threadIdx.x);
// printf("%d\n", threadIdx.x);
d_visited[threadIdx.x] = true;
d_push[threadIdx.x] = 1;
}
}
/*if (threadIdx.x < v) {
if (visited[threadIdx.x] == true) {
printf("%d\n", threadIdx.x);
}
}
/*if (threadIdx.x < v*v) {
if (dmat[threadIdx.x] == 1) {
printf("%d\n", threadIdx.x);
}
}
*/
}
int main(int arg, char** argv) {
int* mat;
int i;
int v;
char single;
if (arg != 3)
{
printf("usage: ./out size, starting_index example: ./t 5 2 \n");
return -1;
}
v = atoi(argv[1]);
int start = atoi(argv[2]);
if (start >= v || start<0) {
printf("start index is out of bounds \n");
return -1;
}
FILE *pToFile = fopen("graph.txt", "r");
i = 0;
//
mat = (int*)malloc(v *v * sizeof(int));
//read from mygraph.txt
while ((single = fgetc(pToFile)) != EOF) {
if (single != '\n') {
if (single == '1') {
// cout << i << endl;
mat[i] = 1;
//cout << mat[i] << endl;
}
else {
mat[i] = 0;
}
i++;
}
}
fclose(pToFile);
int * dmat;
// create device objects
cudaMalloc((void**)&dmat, sizeof(int) * v*v);
cudaMemcpy((void*)dmat, (void*)mat, sizeof(int)*v*v, cudaMemcpyHostToDevice);
bool* visited = (bool*)malloc(v * sizeof(bool)); //visited
bool* d_visited;
for (int i = 0; i < v; i++) {
visited[i] = false;
}
visited[start] = true;
cudaMalloc((void**)&d_visited, sizeof(bool) * v);
cudaMemcpy((void*)d_visited, (void*)visited, sizeof(bool)*v, cudaMemcpyHostToDevice);
queue<int> q; //queue
q.push(start);
int* h_push = (int*)malloc(v * sizeof(int)); //h push
int* d_push;
for (i = 0; i < v; i++) {
h_push[i] = 0;
}
cudaMalloc((void**)&d_push, sizeof(int) * v);
cudaMemcpy((void*)d_push, (void*)h_push, sizeof(int)*v, cudaMemcpyHostToDevice);
cudaEvent_t st1, stop;
cudaEventCreate(&st1);
cudaEventCreate(&stop);
float milliseconds = 0;
int j = 0;;
cudaEventRecord(st1);
// once vertex is found to be a neighbor adds it to end
while (!q.empty()) {
for (i = 0; i < v; i++) {
h_push[i] = 0;
}
cudaMemcpy((void*)d_push, (void*)h_push, sizeof(int)*v, cudaMemcpyHostToDevice);
// cudaMemcpy((void*)d_visited, (void*)visited, sizeof(bool)*v, cudaMemcpyHostToDevice);
i = q.front();
cout << q.front() << " ";
q.pop();
cuda_bfs << <1, v >> >(v, i, dmat, d_visited, d_push);
cudaMemcpy((void*)h_push, (void*)d_push, sizeof(int) * v, cudaMemcpyDeviceToHost);
// cudaMemcpy((void*)d_visited, (void*)visited, sizeof(bool) * v, cudaMemcpyDeviceToHost);
// cout << h_push[1];
for (j = 0; j < v; j++) {
if (h_push[j] == 1) {
//cout << j;
q.push(j);
}
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, st1, stop);
cout << endl << milliseconds << " ms" <<endl;
cudaFree(dmat);
cudaFree(d_visited);
cudaFree(d_push);
//
//cuda_bfs << <1, v >> >(v,i, dmat,d_visited, d_push);
return 0;
}
|
48e46bb5167faa82a55cfa1bc078bfa08640fdaa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
// NVIDIA/apex is licensed under the
// BSD 3 - Clause "New" or "Revised" License
//
/* Modifications Copyright (c) Microsoft. */
#include "core/providers/cuda/cu_inc/common.cuh"
#include "layer_norm_impl.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
using namespace onnxruntime::cuda;
template <typename U, bool simplified>
__device__ void cuWelfordOnlineSum(
const U curr,
U& mu,
U& sigma2,
U& count) {
count = count + U(1);
U delta = curr - mu;
U lmean = mu + delta / count;
mu = lmean;
if (simplified) {
sigma2 = sigma2 + curr * curr;
} else {
U delta2 = curr - lmean;
sigma2 = sigma2 + delta * delta2;
}
}
template <typename U, bool simplified>
__device__ void cuChanOnlineSum(
const U muB,
const U sigma2B,
const U countB,
U& mu,
U& sigma2,
U& count) {
U delta = muB - mu;
U nA = count;
U nB = countB;
count = count + countB;
U nX = count;
if (nX > U(0)) {
nA = nA / nX;
nB = nB / nX;
mu = nA * mu + nB * muB;
if (simplified) {
sigma2 = sigma2 + sigma2B;
} else {
sigma2 = sigma2 + sigma2B + delta * delta * nA * nB * nX;
}
} else {
mu = U(0);
sigma2 = U(0);
}
}
template <typename T, typename U, bool simplified>
__device__ void cuWelfordMuSigma2(
const T* __restrict__ vals,
const int n1,
const int n2,
const int i1,
U& mu,
U& sigma2,
U* buf) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
U count = U(0);
mu = U(0);
sigma2 = U(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const T* lvals = vals + i1 * n2;
int l = 4 * thrx;
for (; l + 3 < n2; l += 4 * numx) {
for (int k = 0; k < 4; ++k) {
U curr = static_cast<U>(lvals[l + k]);
cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count);
}
}
for (; l < n2; ++l) {
U curr = static_cast<U>(lvals[l]);
cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count);
}
// intra-warp reductions
#pragma unroll
for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) {
U muB = WARP_SHFL_DOWN(mu, stride);
U countB = WARP_SHFL_DOWN(count, stride);
U sigma2B = WARP_SHFL_DOWN(sigma2, stride);
cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
U* ubuf = (U*)buf;
U* ibuf = (U*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
U muB = ubuf[2 * threadIdx.y];
U sigma2B = ubuf[2 * threadIdx.y + 1];
U countB = ibuf[threadIdx.y];
cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / U(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0);
sigma2 = WARP_SHFL(sigma2 / U(n2), 0);
}
}
}
template <bool simplified>
__device__ void cuWelfordMuSigma2(
const half* __restrict__ vals,
const int n1,
const int n2,
const int i1,
float& mu,
float& sigma2,
float* buf) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
float count = 0.0f;
mu = float(0);
sigma2 = float(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const half* lvals = vals + i1 * n2;
int l = 8 * thrx;
if ((((size_t)lvals) & 3) != 0) {
// 16 bit alignment
// first thread consumes first point
if (thrx == 0) {
float curr = static_cast<float>(lvals[0]);
cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count);
}
++l;
}
// at this point, lvals[l] are 32 bit aligned for all threads.
for (; l + 7 < n2; l += 8 * numx) {
for (int k = 0; k < 8; k += 2) {
float2 curr = __half22float2(*((__half2*)(lvals + l + k)));
cuWelfordOnlineSum<float, simplified>(static_cast<float>(curr.x), mu, sigma2, count);
cuWelfordOnlineSum<float, simplified>(static_cast<float>(curr.y), mu, sigma2, count);
}
}
for (; l < n2; ++l) {
float curr = static_cast<float>(lvals[l]);
cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count);
}
// intra-warp reductions
#pragma unroll
for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) {
float muB = WARP_SHFL_DOWN(mu, stride);
float countB = WARP_SHFL_DOWN(count, stride);
float sigma2B = WARP_SHFL_DOWN(sigma2, stride);
cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
float* ubuf = (float*)buf;
float* ibuf = (float*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
float muB = ubuf[2 * threadIdx.y];
float sigma2B = ubuf[2 * threadIdx.y + 1];
float countB = ibuf[threadIdx.y];
cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / float(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0);
sigma2 = WARP_SHFL(sigma2 / float(n2), 0);
}
}
}
template <typename U>
__device__ U rsqrt(U v) {
return U(1) / sqrt(v);
}
template <>
__device__ float rsqrt(float v) {
return rsqrtf(v);
}
template <>
__device__ double rsqrt(double v) {
return rsqrt(v);
}
namespace {
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
// template <typename T>
// struct SharedMemory
// {
// // Ensure that we won't compile any un-specialized types
// __device__ T *getPointer()
// {
// extern __device__ void error(void);
// error();
// return NULL;
// }
// };
// https://github.com/NVIDIA/apex/issues/246
template <typename T>
struct SharedMemory;
template <>
struct SharedMemory<float> {
__device__ float* getPointer() {
extern __shared__ float s_float[];
return s_float;
}
};
template <>
struct SharedMemory<double> {
__device__ double* getPointer() {
extern __shared__ double s_double[];
return s_double;
}
};
} // namespace
template <typename T, typename U, bool simplified>
__global__ void cuApplyLayerNorm(
T* __restrict__ output_vals,
U* __restrict__ mean,
U* __restrict__ inv_std_dev,
const T* __restrict__ vals,
const int n1,
const int n2,
const U epsilon,
const T* __restrict__ gamma,
const T* __restrict__ beta) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensors are contiguous
//
for (int i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) {
SharedMemory<U> shared;
U* buf = shared.getPointer();
U mu, sigma2;
cuWelfordMuSigma2<T, U, simplified>(vals, n1, n2, i1, mu, sigma2, buf);
const T* lvals = vals + i1 * n2;
T* ovals = output_vals + i1 * n2;
U c_inv_std_dev = rsqrt(sigma2 + epsilon);
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
for (int i = thrx; i < n2; i += numx) {
U curr = static_cast<U>(lvals[i]);
T gamma_i = (gamma != NULL) ? gamma[i]: (T)1;
T beta_i = (beta != NULL) ? beta[i] : (T) 0;
if (simplified) {
ovals[i] = gamma_i * static_cast<T>(c_inv_std_dev * curr);
} else {
ovals[i] = gamma_i * static_cast<T>(c_inv_std_dev * (curr - mu)) + beta_i;
}
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
if (mean != nullptr) mean[i1] = mu;
if (inv_std_dev != nullptr) inv_std_dev[i1] = c_inv_std_dev;
}
}
}
template <typename T, typename U, bool simplified>
void HostApplyLayerNorm(
const hipDeviceProp_t& prop,
hipStream_t stream,
T* output,
U* mean,
U* inv_std_dev,
const T* input,
int n1,
int n2,
double epsilon,
const T* gamma,
const T* beta) {
const int maxGridY = prop.maxGridSize[1];
const int warp_size = prop.warpSize;
ORT_ENFORCE(warp_size == GPU_WARP_SIZE);
const dim3 threads(warp_size, 4, 1);
const dim3 blocks(1, std::min<unsigned int>(n1, maxGridY), 1);
int nshared =
threads.y > 1 ? threads.y * sizeof(U) + (threads.y / 2) * sizeof(U) : 0;
hipLaunchKernelGGL(( cuApplyLayerNorm<T, U, simplified>), dim3(blocks), dim3(threads), nshared, stream,
output,
mean,
inv_std_dev,
input,
n1, n2,
U(epsilon),
gamma, beta);
}
#define LAYERNORM_LINEAR_IMPL(T, U, simplified) \
template void HostApplyLayerNorm<T, U, simplified>(const hipDeviceProp_t& prop, hipStream_t stream, T* output, U* mean, U* inv_std_dev, const T* input, int n1, int n2, \
double epsilon, const T* gamma, const T* beta);
LAYERNORM_LINEAR_IMPL(float, float, true)
LAYERNORM_LINEAR_IMPL(half, float, true)
LAYERNORM_LINEAR_IMPL(double, double, true)
LAYERNORM_LINEAR_IMPL(float, float, false)
LAYERNORM_LINEAR_IMPL(half, float, false)
LAYERNORM_LINEAR_IMPL(double, double, false)
//LAYERNORM_LINEAR_IMPL(half, half)
#if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__))
LAYERNORM_LINEAR_IMPL(nv_bfloat16, float, true)
LAYERNORM_LINEAR_IMPL(nv_bfloat16, float, false)
#endif
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
48e46bb5167faa82a55cfa1bc078bfa08640fdaa.cu
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
// NVIDIA/apex is licensed under the
// BSD 3 - Clause "New" or "Revised" License
//
/* Modifications Copyright (c) Microsoft. */
#include "core/providers/cuda/cu_inc/common.cuh"
#include "layer_norm_impl.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
using namespace onnxruntime::cuda;
template <typename U, bool simplified>
__device__ void cuWelfordOnlineSum(
const U curr,
U& mu,
U& sigma2,
U& count) {
count = count + U(1);
U delta = curr - mu;
U lmean = mu + delta / count;
mu = lmean;
if (simplified) {
sigma2 = sigma2 + curr * curr;
} else {
U delta2 = curr - lmean;
sigma2 = sigma2 + delta * delta2;
}
}
template <typename U, bool simplified>
__device__ void cuChanOnlineSum(
const U muB,
const U sigma2B,
const U countB,
U& mu,
U& sigma2,
U& count) {
U delta = muB - mu;
U nA = count;
U nB = countB;
count = count + countB;
U nX = count;
if (nX > U(0)) {
nA = nA / nX;
nB = nB / nX;
mu = nA * mu + nB * muB;
if (simplified) {
sigma2 = sigma2 + sigma2B;
} else {
sigma2 = sigma2 + sigma2B + delta * delta * nA * nB * nX;
}
} else {
mu = U(0);
sigma2 = U(0);
}
}
template <typename T, typename U, bool simplified>
__device__ void cuWelfordMuSigma2(
const T* __restrict__ vals,
const int n1,
const int n2,
const int i1,
U& mu,
U& sigma2,
U* buf) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
U count = U(0);
mu = U(0);
sigma2 = U(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const T* lvals = vals + i1 * n2;
int l = 4 * thrx;
for (; l + 3 < n2; l += 4 * numx) {
for (int k = 0; k < 4; ++k) {
U curr = static_cast<U>(lvals[l + k]);
cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count);
}
}
for (; l < n2; ++l) {
U curr = static_cast<U>(lvals[l]);
cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count);
}
// intra-warp reductions
#pragma unroll
for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) {
U muB = WARP_SHFL_DOWN(mu, stride);
U countB = WARP_SHFL_DOWN(count, stride);
U sigma2B = WARP_SHFL_DOWN(sigma2, stride);
cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
U* ubuf = (U*)buf;
U* ibuf = (U*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
U muB = ubuf[2 * threadIdx.y];
U sigma2B = ubuf[2 * threadIdx.y + 1];
U countB = ibuf[threadIdx.y];
cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / U(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0);
sigma2 = WARP_SHFL(sigma2 / U(n2), 0);
}
}
}
template <bool simplified>
__device__ void cuWelfordMuSigma2(
const half* __restrict__ vals,
const int n1,
const int n2,
const int i1,
float& mu,
float& sigma2,
float* buf) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
float count = 0.0f;
mu = float(0);
sigma2 = float(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const half* lvals = vals + i1 * n2;
int l = 8 * thrx;
if ((((size_t)lvals) & 3) != 0) {
// 16 bit alignment
// first thread consumes first point
if (thrx == 0) {
float curr = static_cast<float>(lvals[0]);
cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count);
}
++l;
}
// at this point, lvals[l] are 32 bit aligned for all threads.
for (; l + 7 < n2; l += 8 * numx) {
for (int k = 0; k < 8; k += 2) {
float2 curr = __half22float2(*((__half2*)(lvals + l + k)));
cuWelfordOnlineSum<float, simplified>(static_cast<float>(curr.x), mu, sigma2, count);
cuWelfordOnlineSum<float, simplified>(static_cast<float>(curr.y), mu, sigma2, count);
}
}
for (; l < n2; ++l) {
float curr = static_cast<float>(lvals[l]);
cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count);
}
// intra-warp reductions
#pragma unroll
for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) {
float muB = WARP_SHFL_DOWN(mu, stride);
float countB = WARP_SHFL_DOWN(count, stride);
float sigma2B = WARP_SHFL_DOWN(sigma2, stride);
cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
float* ubuf = (float*)buf;
float* ibuf = (float*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
float muB = ubuf[2 * threadIdx.y];
float sigma2B = ubuf[2 * threadIdx.y + 1];
float countB = ibuf[threadIdx.y];
cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / float(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0);
sigma2 = WARP_SHFL(sigma2 / float(n2), 0);
}
}
}
template <typename U>
__device__ U rsqrt(U v) {
return U(1) / sqrt(v);
}
template <>
__device__ float rsqrt(float v) {
return rsqrtf(v);
}
template <>
__device__ double rsqrt(double v) {
return rsqrt(v);
}
namespace {
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
// template <typename T>
// struct SharedMemory
// {
// // Ensure that we won't compile any un-specialized types
// __device__ T *getPointer()
// {
// extern __device__ void error(void);
// error();
// return NULL;
// }
// };
// https://github.com/NVIDIA/apex/issues/246
template <typename T>
struct SharedMemory;
template <>
struct SharedMemory<float> {
__device__ float* getPointer() {
extern __shared__ float s_float[];
return s_float;
}
};
template <>
struct SharedMemory<double> {
__device__ double* getPointer() {
extern __shared__ double s_double[];
return s_double;
}
};
} // namespace
template <typename T, typename U, bool simplified>
__global__ void cuApplyLayerNorm(
T* __restrict__ output_vals,
U* __restrict__ mean,
U* __restrict__ inv_std_dev,
const T* __restrict__ vals,
const int n1,
const int n2,
const U epsilon,
const T* __restrict__ gamma,
const T* __restrict__ beta) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensors are contiguous
//
for (int i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) {
SharedMemory<U> shared;
U* buf = shared.getPointer();
U mu, sigma2;
cuWelfordMuSigma2<T, U, simplified>(vals, n1, n2, i1, mu, sigma2, buf);
const T* lvals = vals + i1 * n2;
T* ovals = output_vals + i1 * n2;
U c_inv_std_dev = rsqrt(sigma2 + epsilon);
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
for (int i = thrx; i < n2; i += numx) {
U curr = static_cast<U>(lvals[i]);
T gamma_i = (gamma != NULL) ? gamma[i]: (T)1;
T beta_i = (beta != NULL) ? beta[i] : (T) 0;
if (simplified) {
ovals[i] = gamma_i * static_cast<T>(c_inv_std_dev * curr);
} else {
ovals[i] = gamma_i * static_cast<T>(c_inv_std_dev * (curr - mu)) + beta_i;
}
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
if (mean != nullptr) mean[i1] = mu;
if (inv_std_dev != nullptr) inv_std_dev[i1] = c_inv_std_dev;
}
}
}
template <typename T, typename U, bool simplified>
void HostApplyLayerNorm(
const cudaDeviceProp& prop,
cudaStream_t stream,
T* output,
U* mean,
U* inv_std_dev,
const T* input,
int n1,
int n2,
double epsilon,
const T* gamma,
const T* beta) {
const int maxGridY = prop.maxGridSize[1];
const int warp_size = prop.warpSize;
ORT_ENFORCE(warp_size == GPU_WARP_SIZE);
const dim3 threads(warp_size, 4, 1);
const dim3 blocks(1, std::min<unsigned int>(n1, maxGridY), 1);
int nshared =
threads.y > 1 ? threads.y * sizeof(U) + (threads.y / 2) * sizeof(U) : 0;
cuApplyLayerNorm<T, U, simplified><<<blocks, threads, nshared, stream>>>(
output,
mean,
inv_std_dev,
input,
n1, n2,
U(epsilon),
gamma, beta);
}
#define LAYERNORM_LINEAR_IMPL(T, U, simplified) \
template void HostApplyLayerNorm<T, U, simplified>(const cudaDeviceProp& prop, cudaStream_t stream, T* output, U* mean, U* inv_std_dev, const T* input, int n1, int n2, \
double epsilon, const T* gamma, const T* beta);
LAYERNORM_LINEAR_IMPL(float, float, true)
LAYERNORM_LINEAR_IMPL(half, float, true)
LAYERNORM_LINEAR_IMPL(double, double, true)
LAYERNORM_LINEAR_IMPL(float, float, false)
LAYERNORM_LINEAR_IMPL(half, float, false)
LAYERNORM_LINEAR_IMPL(double, double, false)
//LAYERNORM_LINEAR_IMPL(half, half)
#if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__))
LAYERNORM_LINEAR_IMPL(nv_bfloat16, float, true)
LAYERNORM_LINEAR_IMPL(nv_bfloat16, float, false)
#endif
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
741f9aabd6bb754b7d040b8ca8e233ef26d9592a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
typedef THCDeviceTensor<float, 4> DeviceTensor4;
typedef THCDeviceTensor<float, 1> DeviceTensor1;
// Returns the index of the most significant 1 bit in `val`.
__device__ __forceinline__ int getMSB(int val) {
return 31 - __clz(val);
}
struct Float2 {
float v1, v2;
__device__ Float2() {}
__device__ Float2(float v1, float v2) : v1(v1), v2(v2) {}
__device__ Float2(float v) : v1(v), v2(v) {}
__device__ Float2& operator+=(const Float2& a) {
v1 += a.v1;
v2 += a.v2;
return *this;
}
};
struct SumOp {
__device__ SumOp(const DeviceTensor4 t) : tensor(t) {}
__device__ __forceinline__ float operator()(int batch, int plane, int y, int x) {
return tensor[batch][plane][y][x];
}
const DeviceTensor4 tensor;
};
struct VarOp {
__device__ VarOp(float m, const DeviceTensor4 t) : mean(m), tensor(t) {}
__device__ __forceinline__ float operator()(int batch, int plane, int y, int x) {
float val = tensor[batch][plane][y][x];
return (val - mean) * (val - mean);
}
const float mean;
const DeviceTensor4 tensor;
};
struct GradOp {
__device__ GradOp(float m, const DeviceTensor4 i, const DeviceTensor4 g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2 operator()(int batch, int plane, int y, int x) {
float g = gradOutput[batch][plane][y][x];
float c = input[batch][plane][y][x] - mean;
return Float2(g, g * c);
}
const float mean;
const DeviceTensor4 input;
const DeviceTensor4 gradOutput;
};
// Sum across NumThreads threads within a warp
template<int NumThreads>
static __device__ __forceinline__ float warpSum(float val) {
#if __CUDA_ARCH__ >= 300
for (int i = 0; i < getMSB(NumThreads); ++i) {
val += __shfl_xor(val, 1 << i, NumThreads);
}
#else
__shared__ float values[NumThreads][NumThreads];
__syncthreads();
values[threadIdx.y][threadIdx.x] = val;
__syncthreads();
for (int i = 1; i < NumThreads; i++) {
val += values[threadIdx.y][(i + threadIdx.x) % NumThreads];
}
__syncthreads();
#endif
return val;
}
template<int NumThreads>
static __device__ __forceinline__ Float2 warpSum(Float2 value) {
value.v1 = warpSum<NumThreads>(value.v1);
value.v2 = warpSum<NumThreads>(value.v2);
return value;
}
// Sum across (batch, y, x) applying Op() pointwise
template<typename T, int NumThreads, typename Op>
__device__ T reduce(Op op, DeviceTensor4 tensor, int plane) {
T sum = (T)0;
for (int y = threadIdx.y; y < tensor.getSize(2); y += NumThreads) {
for (int batch = 0; batch < tensor.getSize(0); ++batch) {
for (int x = threadIdx.x; x < tensor.getSize(3); x += NumThreads) {
sum += op(batch, plane, y, x);
}
}
}
// sum over NumThreads within a warp
sum = warpSum<NumThreads>(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[NumThreads];
if (threadIdx.x == 0) {
shared[threadIdx.y] = sum;
}
__syncthreads();
sum = warpSum<NumThreads>(shared[threadIdx.x]);
if (threadIdx.y == 0) {
shared[threadIdx.x] = sum;
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <int Dim>
static THCDeviceTensor<float, Dim> checktensor(lua_State* L, int index) {
THCudaTensor *t = (THCudaTensor*)luaT_toudata(L, index, "torch.CudaTensor");
if (!t) {
return THCDeviceTensor<float, Dim>();
}
return toDeviceTensor<float, Dim>(getCutorchState(L), t);
}
__global__ void SpatialBatchNormalizationUpdateOutputInference_kernel(
const DeviceTensor4 input,
DeviceTensor4 output,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
const DeviceTensor1 weight,
const DeviceTensor1 bias,
float epsilon) {
int x = threadIdx.x;
int plane = blockIdx.x;
int batch = blockIdx.y;
float invstd = 1.0f / sqrt(runningVar[plane].ldg() + epsilon);
float mean = runningMean[plane].ldg();
float gamma = weight.numElements() > 0 ? weight[plane].ldg() : 1.0f;
float beta = bias.numElements() > 0 ? bias[plane].ldg() : 0.0f;
for (int y = threadIdx.y; y < output.getSize(2); y += blockDim.y) {
float inp = input[batch][plane][y][x].ldg();
// TODO: everyone pulling this, optimize by reusing better
output[batch][plane][y][x] = gamma * (inp - mean) * invstd + beta;
}
}
template<int NumThreads>
__global__ void SpatialBatchNormalizationUpdateOutput_kernel(
const DeviceTensor4 input,
DeviceTensor4 output,
const DeviceTensor1 weight,
const DeviceTensor1 bias,
const float epsilon,
const float momentum,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveStd) {
assert(blockDim.x == NumThreads);
assert(blockDim.y == NumThreads);
int plane = blockIdx.x;
int N = input.getSize(0) * input.getSize(2) * input.getSize(3);
float norm = 1.0f / N;
// Compute the mean and variance across (batch, y, x)
float mean = reduce<float, NumThreads>(SumOp(input), input, plane) * norm;
__syncthreads();
float varN = reduce<float, NumThreads>(VarOp(mean, input), input, plane);
float invStd = 0.0f;
if (varN != 0.0f || epsilon != 0.0f) {
invStd = 1 / sqrt(varN * norm + epsilon);
}
// Save the mean, variance, and moving averages
if (threadIdx.y == 0 && threadIdx.x == 0) {
// Momentum based writeback
float unbiasedVar = varN / (N - 1);
saveMean[plane] = mean;
saveStd[plane] = invStd;
runningMean[plane] = (1 - momentum) * runningMean[plane] + momentum * mean;
runningVar[plane] = (1 - momentum) * runningVar[plane] + momentum * unbiasedVar;
}
// Write normalized and update the output
float gamma = weight.numElements() > 0 ? weight[plane] : 1.0f;
float beta = bias.numElements() > 0 ? bias[plane] : 0.0f;
for (int y = threadIdx.y; y < input.getSize(2); y += NumThreads) {
for (int batch = 0; batch < input.getSize(0); ++batch) {
for (int x = threadIdx.x; x < input.getSize(3); x += NumThreads) {
float inp = input[batch][plane][y][x].ldg();
output[batch][plane][y][x] = gamma * (inp - mean) * invStd + beta;
}
}
}
}
static int cunn_SpatialBatchNormalization_updateOutput(lua_State *L) {
THCState *state = getCutorchState(L);
DeviceTensor4 input = checktensor<4>(L, 1);
DeviceTensor4 output = checktensor<4>(L, 2);
DeviceTensor1 weight = checktensor<1>(L, 3);
DeviceTensor1 bias = checktensor<1>(L, 4);
int train = lua_toboolean(L, 5);
double eps = lua_tonumber(L, 6);
double momentum = lua_tonumber(L, 7);
DeviceTensor1 runningMean = checktensor<1>(L, 8);
DeviceTensor1 runningVar = checktensor<1>(L, 9);
DeviceTensor1 saveMean = checktensor<1>(L, 10);
DeviceTensor1 saveStd = checktensor<1>(L, 11);
hipStream_t s = THCState_getCurrentStream(state);
hipDeviceProp_t *prop = THCState_getCurrentDeviceProperties(state);
int maxThreadsPerBlock = prop->maxThreadsPerBlock;
if (!train) {
dim3 blocks(input.getSize(1), input.getSize(0));
dim3 threads(input.getSize(3),
min(input.getSize(2), maxThreadsPerBlock / input.getSize(3)));
hipLaunchKernelGGL(( SpatialBatchNormalizationUpdateOutputInference_kernel)
, dim3(blocks), dim3(threads), 0, s,
input, output, runningMean, runningVar, weight, bias, eps);
} else {
dim3 blocks(input.getSize(1));
if (input.getSize(3) >= 12 && input.getSize(2) >= 12) {
dim3 threads(16, 16);
hipLaunchKernelGGL(( SpatialBatchNormalizationUpdateOutput_kernel<16>)
, dim3(blocks), dim3(threads), 0, s,
input, output, weight, bias, eps, momentum, runningMean, runningVar,
saveMean, saveStd);
} else {
dim3 threads(8, 8);
hipLaunchKernelGGL(( SpatialBatchNormalizationUpdateOutput_kernel<8>)
, dim3(blocks), dim3(threads), 0, s,
input, output, weight, bias, eps, momentum, runningMean, runningVar,
saveMean, saveStd);
}
}
return 0;
}
template<int NumThreads>
__global__ void SpatialBatchNormalizationBackward_kernel(
const DeviceTensor4 input,
const DeviceTensor4 gradOutput,
DeviceTensor4 gradInput,
DeviceTensor1 gradWeight,
DeviceTensor1 gradBias,
const DeviceTensor1 weight,
const DeviceTensor1 saveMean,
const DeviceTensor1 saveStd,
float scale) {
assert(blockDim.x == NumThreads);
assert(blockDim.y == NumThreads);
int plane = blockIdx.x;
int N = gradOutput.getSize(0) * gradOutput.getSize(2) * gradOutput.getSize(3);
float mean = saveMean[plane];
float stdVal = saveStd[plane];
float weightVal = weight.numElements() > 0 ? weight[plane] : 1.0f;
float norm = 1.0f / N;
// Compute two values across (batch, y, x) in one pass:
// 1. Sum(gradOutput)
// 2. DotProduct(gradOutput - mean, input)
Float2 res = reduce<Float2, NumThreads>(GradOp(mean, input, gradOutput), gradOutput, plane);
float gradOutputSum = res.v1;
float dotP = res.v2;
float gradMean = gradOutputSum * norm;
float projScale = dotP * norm * stdVal * stdVal;
float gradScale = stdVal * weightVal;
if (gradInput.numElements() > 0) {
for (int y = threadIdx.y; y < gradOutput.getSize(2); y += NumThreads) {
for (int batch = 0; batch < gradOutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradOutput.getSize(3); x += NumThreads) {
float gradOut = gradOutput[batch][plane][y][x];
float inp = input[batch][plane][y][x];
float proj = (inp - mean) * projScale;
gradInput[batch][plane][y][x] = (gradOut - proj - gradMean) * gradScale;
}
}
}
}
if (gradWeight.numElements() > 0) {
if (threadIdx.x == 0 && threadIdx.y == 0) {
gradWeight[plane] += scale * dotP * stdVal;
}
}
if (gradBias.numElements() > 0) {
if (threadIdx.x == 0 && threadIdx.y == 0) {
gradBias[plane] += scale * gradOutputSum;
}
}
}
static int cunn_SpatialBatchNormalization_backward(lua_State *L) {
THCState *state = getCutorchState(L);
DeviceTensor4 input = checktensor<4>(L, 1);
DeviceTensor4 gradOutput = checktensor<4>(L, 2);
DeviceTensor4 gradInput = checktensor<4>(L, 3);
DeviceTensor1 gradWeight = checktensor<1>(L, 4);
DeviceTensor1 gradBias = checktensor<1>(L, 5);
DeviceTensor1 weight = checktensor<1>(L, 6);
DeviceTensor1 saveMean = checktensor<1>(L, 7);
DeviceTensor1 saveStd = checktensor<1>(L, 8);
float scale = (float) lua_tonumber(L, 9);
hipStream_t s = THCState_getCurrentStream(state);
dim3 blocks(gradOutput.getSize(1));
if (gradOutput.getSize(3) >= 12 && gradOutput.getSize(2) >= 12) {
dim3 threads(16, 16);
hipLaunchKernelGGL(( SpatialBatchNormalizationBackward_kernel<16>)
, dim3(blocks), dim3(threads), 0, s,
input, gradOutput, gradInput, gradWeight, gradBias, weight,
saveMean, saveStd, scale);
} else {
dim3 threads(8, 8);
hipLaunchKernelGGL(( SpatialBatchNormalizationBackward_kernel<8>)
, dim3(blocks), dim3(threads), 0, s,
input, gradOutput, gradInput, gradWeight, gradBias, weight,
saveMean, saveStd, scale);
}
return 0;
}
static const struct luaL_Reg cunn_SpatialBatchNormalization__ [] = {
{"SpatialBatchNormalization_updateOutput", cunn_SpatialBatchNormalization_updateOutput},
{"SpatialBatchNormalization_backward", cunn_SpatialBatchNormalization_backward},
{NULL, NULL}
};
void cunn_SpatialBatchNormalization_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_SpatialBatchNormalization__, "nn");
lua_pop(L,1);
}
|
741f9aabd6bb754b7d040b8ca8e233ef26d9592a.cu
|
#include "utils.h"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
typedef THCDeviceTensor<float, 4> DeviceTensor4;
typedef THCDeviceTensor<float, 1> DeviceTensor1;
// Returns the index of the most significant 1 bit in `val`.
__device__ __forceinline__ int getMSB(int val) {
return 31 - __clz(val);
}
struct Float2 {
float v1, v2;
__device__ Float2() {}
__device__ Float2(float v1, float v2) : v1(v1), v2(v2) {}
__device__ Float2(float v) : v1(v), v2(v) {}
__device__ Float2& operator+=(const Float2& a) {
v1 += a.v1;
v2 += a.v2;
return *this;
}
};
struct SumOp {
__device__ SumOp(const DeviceTensor4 t) : tensor(t) {}
__device__ __forceinline__ float operator()(int batch, int plane, int y, int x) {
return tensor[batch][plane][y][x];
}
const DeviceTensor4 tensor;
};
struct VarOp {
__device__ VarOp(float m, const DeviceTensor4 t) : mean(m), tensor(t) {}
__device__ __forceinline__ float operator()(int batch, int plane, int y, int x) {
float val = tensor[batch][plane][y][x];
return (val - mean) * (val - mean);
}
const float mean;
const DeviceTensor4 tensor;
};
struct GradOp {
__device__ GradOp(float m, const DeviceTensor4 i, const DeviceTensor4 g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2 operator()(int batch, int plane, int y, int x) {
float g = gradOutput[batch][plane][y][x];
float c = input[batch][plane][y][x] - mean;
return Float2(g, g * c);
}
const float mean;
const DeviceTensor4 input;
const DeviceTensor4 gradOutput;
};
// Sum across NumThreads threads within a warp
template<int NumThreads>
static __device__ __forceinline__ float warpSum(float val) {
#if __CUDA_ARCH__ >= 300
for (int i = 0; i < getMSB(NumThreads); ++i) {
val += __shfl_xor(val, 1 << i, NumThreads);
}
#else
__shared__ float values[NumThreads][NumThreads];
__syncthreads();
values[threadIdx.y][threadIdx.x] = val;
__syncthreads();
for (int i = 1; i < NumThreads; i++) {
val += values[threadIdx.y][(i + threadIdx.x) % NumThreads];
}
__syncthreads();
#endif
return val;
}
template<int NumThreads>
static __device__ __forceinline__ Float2 warpSum(Float2 value) {
value.v1 = warpSum<NumThreads>(value.v1);
value.v2 = warpSum<NumThreads>(value.v2);
return value;
}
// Sum across (batch, y, x) applying Op() pointwise
template<typename T, int NumThreads, typename Op>
__device__ T reduce(Op op, DeviceTensor4 tensor, int plane) {
T sum = (T)0;
for (int y = threadIdx.y; y < tensor.getSize(2); y += NumThreads) {
for (int batch = 0; batch < tensor.getSize(0); ++batch) {
for (int x = threadIdx.x; x < tensor.getSize(3); x += NumThreads) {
sum += op(batch, plane, y, x);
}
}
}
// sum over NumThreads within a warp
sum = warpSum<NumThreads>(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[NumThreads];
if (threadIdx.x == 0) {
shared[threadIdx.y] = sum;
}
__syncthreads();
sum = warpSum<NumThreads>(shared[threadIdx.x]);
if (threadIdx.y == 0) {
shared[threadIdx.x] = sum;
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <int Dim>
static THCDeviceTensor<float, Dim> checktensor(lua_State* L, int index) {
THCudaTensor *t = (THCudaTensor*)luaT_toudata(L, index, "torch.CudaTensor");
if (!t) {
return THCDeviceTensor<float, Dim>();
}
return toDeviceTensor<float, Dim>(getCutorchState(L), t);
}
__global__ void SpatialBatchNormalizationUpdateOutputInference_kernel(
const DeviceTensor4 input,
DeviceTensor4 output,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
const DeviceTensor1 weight,
const DeviceTensor1 bias,
float epsilon) {
int x = threadIdx.x;
int plane = blockIdx.x;
int batch = blockIdx.y;
float invstd = 1.0f / sqrt(runningVar[plane].ldg() + epsilon);
float mean = runningMean[plane].ldg();
float gamma = weight.numElements() > 0 ? weight[plane].ldg() : 1.0f;
float beta = bias.numElements() > 0 ? bias[plane].ldg() : 0.0f;
for (int y = threadIdx.y; y < output.getSize(2); y += blockDim.y) {
float inp = input[batch][plane][y][x].ldg();
// TODO: everyone pulling this, optimize by reusing better
output[batch][plane][y][x] = gamma * (inp - mean) * invstd + beta;
}
}
template<int NumThreads>
__global__ void SpatialBatchNormalizationUpdateOutput_kernel(
const DeviceTensor4 input,
DeviceTensor4 output,
const DeviceTensor1 weight,
const DeviceTensor1 bias,
const float epsilon,
const float momentum,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveStd) {
assert(blockDim.x == NumThreads);
assert(blockDim.y == NumThreads);
int plane = blockIdx.x;
int N = input.getSize(0) * input.getSize(2) * input.getSize(3);
float norm = 1.0f / N;
// Compute the mean and variance across (batch, y, x)
float mean = reduce<float, NumThreads>(SumOp(input), input, plane) * norm;
__syncthreads();
float varN = reduce<float, NumThreads>(VarOp(mean, input), input, plane);
float invStd = 0.0f;
if (varN != 0.0f || epsilon != 0.0f) {
invStd = 1 / sqrt(varN * norm + epsilon);
}
// Save the mean, variance, and moving averages
if (threadIdx.y == 0 && threadIdx.x == 0) {
// Momentum based writeback
float unbiasedVar = varN / (N - 1);
saveMean[plane] = mean;
saveStd[plane] = invStd;
runningMean[plane] = (1 - momentum) * runningMean[plane] + momentum * mean;
runningVar[plane] = (1 - momentum) * runningVar[plane] + momentum * unbiasedVar;
}
// Write normalized and update the output
float gamma = weight.numElements() > 0 ? weight[plane] : 1.0f;
float beta = bias.numElements() > 0 ? bias[plane] : 0.0f;
for (int y = threadIdx.y; y < input.getSize(2); y += NumThreads) {
for (int batch = 0; batch < input.getSize(0); ++batch) {
for (int x = threadIdx.x; x < input.getSize(3); x += NumThreads) {
float inp = input[batch][plane][y][x].ldg();
output[batch][plane][y][x] = gamma * (inp - mean) * invStd + beta;
}
}
}
}
static int cunn_SpatialBatchNormalization_updateOutput(lua_State *L) {
THCState *state = getCutorchState(L);
DeviceTensor4 input = checktensor<4>(L, 1);
DeviceTensor4 output = checktensor<4>(L, 2);
DeviceTensor1 weight = checktensor<1>(L, 3);
DeviceTensor1 bias = checktensor<1>(L, 4);
int train = lua_toboolean(L, 5);
double eps = lua_tonumber(L, 6);
double momentum = lua_tonumber(L, 7);
DeviceTensor1 runningMean = checktensor<1>(L, 8);
DeviceTensor1 runningVar = checktensor<1>(L, 9);
DeviceTensor1 saveMean = checktensor<1>(L, 10);
DeviceTensor1 saveStd = checktensor<1>(L, 11);
cudaStream_t s = THCState_getCurrentStream(state);
cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state);
int maxThreadsPerBlock = prop->maxThreadsPerBlock;
if (!train) {
dim3 blocks(input.getSize(1), input.getSize(0));
dim3 threads(input.getSize(3),
min(input.getSize(2), maxThreadsPerBlock / input.getSize(3)));
SpatialBatchNormalizationUpdateOutputInference_kernel
<<<blocks, threads, 0, s>>>
(input, output, runningMean, runningVar, weight, bias, eps);
} else {
dim3 blocks(input.getSize(1));
if (input.getSize(3) >= 12 && input.getSize(2) >= 12) {
dim3 threads(16, 16);
SpatialBatchNormalizationUpdateOutput_kernel<16>
<<<blocks, threads, 0, s>>>
(input, output, weight, bias, eps, momentum, runningMean, runningVar,
saveMean, saveStd);
} else {
dim3 threads(8, 8);
SpatialBatchNormalizationUpdateOutput_kernel<8>
<<<blocks, threads, 0, s>>>
(input, output, weight, bias, eps, momentum, runningMean, runningVar,
saveMean, saveStd);
}
}
return 0;
}
template<int NumThreads>
__global__ void SpatialBatchNormalizationBackward_kernel(
const DeviceTensor4 input,
const DeviceTensor4 gradOutput,
DeviceTensor4 gradInput,
DeviceTensor1 gradWeight,
DeviceTensor1 gradBias,
const DeviceTensor1 weight,
const DeviceTensor1 saveMean,
const DeviceTensor1 saveStd,
float scale) {
assert(blockDim.x == NumThreads);
assert(blockDim.y == NumThreads);
int plane = blockIdx.x;
int N = gradOutput.getSize(0) * gradOutput.getSize(2) * gradOutput.getSize(3);
float mean = saveMean[plane];
float stdVal = saveStd[plane];
float weightVal = weight.numElements() > 0 ? weight[plane] : 1.0f;
float norm = 1.0f / N;
// Compute two values across (batch, y, x) in one pass:
// 1. Sum(gradOutput)
// 2. DotProduct(gradOutput - mean, input)
Float2 res = reduce<Float2, NumThreads>(GradOp(mean, input, gradOutput), gradOutput, plane);
float gradOutputSum = res.v1;
float dotP = res.v2;
float gradMean = gradOutputSum * norm;
float projScale = dotP * norm * stdVal * stdVal;
float gradScale = stdVal * weightVal;
if (gradInput.numElements() > 0) {
for (int y = threadIdx.y; y < gradOutput.getSize(2); y += NumThreads) {
for (int batch = 0; batch < gradOutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradOutput.getSize(3); x += NumThreads) {
float gradOut = gradOutput[batch][plane][y][x];
float inp = input[batch][plane][y][x];
float proj = (inp - mean) * projScale;
gradInput[batch][plane][y][x] = (gradOut - proj - gradMean) * gradScale;
}
}
}
}
if (gradWeight.numElements() > 0) {
if (threadIdx.x == 0 && threadIdx.y == 0) {
gradWeight[plane] += scale * dotP * stdVal;
}
}
if (gradBias.numElements() > 0) {
if (threadIdx.x == 0 && threadIdx.y == 0) {
gradBias[plane] += scale * gradOutputSum;
}
}
}
static int cunn_SpatialBatchNormalization_backward(lua_State *L) {
THCState *state = getCutorchState(L);
DeviceTensor4 input = checktensor<4>(L, 1);
DeviceTensor4 gradOutput = checktensor<4>(L, 2);
DeviceTensor4 gradInput = checktensor<4>(L, 3);
DeviceTensor1 gradWeight = checktensor<1>(L, 4);
DeviceTensor1 gradBias = checktensor<1>(L, 5);
DeviceTensor1 weight = checktensor<1>(L, 6);
DeviceTensor1 saveMean = checktensor<1>(L, 7);
DeviceTensor1 saveStd = checktensor<1>(L, 8);
float scale = (float) lua_tonumber(L, 9);
cudaStream_t s = THCState_getCurrentStream(state);
dim3 blocks(gradOutput.getSize(1));
if (gradOutput.getSize(3) >= 12 && gradOutput.getSize(2) >= 12) {
dim3 threads(16, 16);
SpatialBatchNormalizationBackward_kernel<16>
<<<blocks, threads, 0, s>>>
(input, gradOutput, gradInput, gradWeight, gradBias, weight,
saveMean, saveStd, scale);
} else {
dim3 threads(8, 8);
SpatialBatchNormalizationBackward_kernel<8>
<<<blocks, threads, 0, s>>>
(input, gradOutput, gradInput, gradWeight, gradBias, weight,
saveMean, saveStd, scale);
}
return 0;
}
static const struct luaL_Reg cunn_SpatialBatchNormalization__ [] = {
{"SpatialBatchNormalization_updateOutput", cunn_SpatialBatchNormalization_updateOutput},
{"SpatialBatchNormalization_backward", cunn_SpatialBatchNormalization_backward},
{NULL, NULL}
};
void cunn_SpatialBatchNormalization_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_SpatialBatchNormalization__, "nn");
lua_pop(L,1);
}
|
c35a7a2f9cc63fc7a7b9237cabf1c62e3b5f0ab0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <algorithm>
#include "vm.hpp"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <cmath>
#define T 512 // threads per block
#define SQ(x) ((x) * (x))
const float ONE_OVER_TWO_PI = (1 / (2 * M_PI));
const float E = 9e-2;
const float SQ_E = SQ(E);
const float ONE_OVER_SQ_E = 1 / SQ_E;
const float VISCOSITY = 5e-3;
struct particle_to_float4 {
float4 operator()(particle p) {
return make_float4(p.x, p.y, p.circ, 0.0);
}
};
__device__ float eta(float sq_r) {
return ONE_OVER_TWO_PI * expf(-sq_r/SQ_E);
}
__device__ float k_factor(float sq_r) {
return (ONE_OVER_TWO_PI / sq_r) * (1 - expf(-sq_r / SQ_E));
}
__device__ float3
particle_interaction(float3 d, float4 p, float4 q) {
float2 r = make_float2(p.x - q.x, p.y - q.y);
float sq_r = SQ(r.x) + SQ(r.y);
// update velocity
float vel_kernel_factor = k_factor(sq_r);
d.x += vel_kernel_factor * -r.y;
d.y += vel_kernel_factor * r.x;
// update circulation
d.z += (q.z - p.z) * eta(sq_r);
return d;
}
__device__ float3
update_tile(float4 p, float3 derivatives) {
extern __shared__ float4 shared_particles[];
# define SHARED(i) (shared_particles[(i) + blockDim.x * threadIdx.x])
unsigned long i = 0;
unsigned int counter = 0;
while (counter < blockDim.x) {
derivatives = particle_interaction(derivatives, p, SHARED(i++));
++counter;
}
derivatives.z *= VISCOSITY * ONE_OVER_SQ_E;
return derivatives;
}
__device__ float3
eval_derivatives(float4 p, float4 *particles, float N) {
extern __shared__ float4 shared_particles[];
unsigned const t = threadIdx.x;
unsigned num_tiles = ::ceil(N / T);
float3 derivatives = make_float3(0.0f, 0.0f, 0.0f);
for (int tile = 0; tile < num_tiles; ++tile) {
shared_particles[t] = particles[tile * T + t];
__syncthreads();
derivatives = update_tile(p, derivatives);
__syncthreads();
}
return derivatives;
}
template <bool get_derivatives>
__global__ void
integrate(float dt, unsigned nr_particles, float4 *old_particles, float4 *new_particles, float4 *new_derivatives) {
unsigned pid = blockIdx.x * blockDim.x + threadIdx.x;
// fetch particle from global memory
if (pid < nr_particles) {
float4 p = old_particles[pid];
// compute velocity and derivative of circulation for particle p
float3 derivatives = eval_derivatives(p, old_particles, nr_particles);
// integrate trajectories and circulation
p.x += derivatives.x * dt;
p.y += derivatives.y * dt;
p.z += derivatives.z * dt;
// put the particle back in global memory
new_particles[pid] = p;
if (get_derivatives) {
new_derivatives[pid] = make_float4(derivatives.x, derivatives.y, derivatives.z, 0.0f);
}
}
}
void solve(std::vector<particle>& particles, std::vector<particle>& derivatives,
float dt, unsigned nr_iterations) {
unsigned N = particles.size();
thrust::host_vector<float4> ps_h(N);
thrust::transform(particles.begin(), particles.end(),
ps_h.begin(),
particle_to_float4());
thrust::device_vector<float4> ps_d[2] = { // I hate you, C++.
thrust::device_vector<float4>(N),
thrust::device_vector<float4>(N),
};
unsigned current_read = 0, current_write = 1;
thrust::copy(ps_h.begin(), ps_h.end(), ps_d[current_read].begin());
for (unsigned i = 0; i < nr_iterations; ++i) {
unsigned nr_blocks = static_cast<unsigned>(::ceil(N / T));
hipLaunchKernelGGL(( integrate<false>), dim3(nr_blocks), dim3(T), T * sizeof(float4), 0, dt, N,
(float4*) thrust::raw_pointer_cast(&ps_d[current_read]),
(float4*) thrust::raw_pointer_cast(&ps_d[current_write]),
NULL);
hipDeviceSynchronize();
std::swap(current_read, current_write);
}
}
|
c35a7a2f9cc63fc7a7b9237cabf1c62e3b5f0ab0.cu
|
#include <vector>
#include <algorithm>
#include "vm.hpp"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <cmath>
#define T 512 // threads per block
#define SQ(x) ((x) * (x))
const float ONE_OVER_TWO_PI = (1 / (2 * M_PI));
const float E = 9e-2;
const float SQ_E = SQ(E);
const float ONE_OVER_SQ_E = 1 / SQ_E;
const float VISCOSITY = 5e-3;
struct particle_to_float4 {
float4 operator()(particle p) {
return make_float4(p.x, p.y, p.circ, 0.0);
}
};
__device__ float eta(float sq_r) {
return ONE_OVER_TWO_PI * expf(-sq_r/SQ_E);
}
__device__ float k_factor(float sq_r) {
return (ONE_OVER_TWO_PI / sq_r) * (1 - expf(-sq_r / SQ_E));
}
__device__ float3
particle_interaction(float3 d, float4 p, float4 q) {
float2 r = make_float2(p.x - q.x, p.y - q.y);
float sq_r = SQ(r.x) + SQ(r.y);
// update velocity
float vel_kernel_factor = k_factor(sq_r);
d.x += vel_kernel_factor * -r.y;
d.y += vel_kernel_factor * r.x;
// update circulation
d.z += (q.z - p.z) * eta(sq_r);
return d;
}
__device__ float3
update_tile(float4 p, float3 derivatives) {
extern __shared__ float4 shared_particles[];
# define SHARED(i) (shared_particles[(i) + blockDim.x * threadIdx.x])
unsigned long i = 0;
unsigned int counter = 0;
while (counter < blockDim.x) {
derivatives = particle_interaction(derivatives, p, SHARED(i++));
++counter;
}
derivatives.z *= VISCOSITY * ONE_OVER_SQ_E;
return derivatives;
}
__device__ float3
eval_derivatives(float4 p, float4 *particles, float N) {
extern __shared__ float4 shared_particles[];
unsigned const t = threadIdx.x;
unsigned num_tiles = std::ceil(N / T);
float3 derivatives = make_float3(0.0f, 0.0f, 0.0f);
for (int tile = 0; tile < num_tiles; ++tile) {
shared_particles[t] = particles[tile * T + t];
__syncthreads();
derivatives = update_tile(p, derivatives);
__syncthreads();
}
return derivatives;
}
template <bool get_derivatives>
__global__ void
integrate(float dt, unsigned nr_particles, float4 *old_particles, float4 *new_particles, float4 *new_derivatives) {
unsigned pid = blockIdx.x * blockDim.x + threadIdx.x;
// fetch particle from global memory
if (pid < nr_particles) {
float4 p = old_particles[pid];
// compute velocity and derivative of circulation for particle p
float3 derivatives = eval_derivatives(p, old_particles, nr_particles);
// integrate trajectories and circulation
p.x += derivatives.x * dt;
p.y += derivatives.y * dt;
p.z += derivatives.z * dt;
// put the particle back in global memory
new_particles[pid] = p;
if (get_derivatives) {
new_derivatives[pid] = make_float4(derivatives.x, derivatives.y, derivatives.z, 0.0f);
}
}
}
void solve(std::vector<particle>& particles, std::vector<particle>& derivatives,
float dt, unsigned nr_iterations) {
unsigned N = particles.size();
thrust::host_vector<float4> ps_h(N);
thrust::transform(particles.begin(), particles.end(),
ps_h.begin(),
particle_to_float4());
thrust::device_vector<float4> ps_d[2] = { // I hate you, C++.
thrust::device_vector<float4>(N),
thrust::device_vector<float4>(N),
};
unsigned current_read = 0, current_write = 1;
thrust::copy(ps_h.begin(), ps_h.end(), ps_d[current_read].begin());
for (unsigned i = 0; i < nr_iterations; ++i) {
unsigned nr_blocks = static_cast<unsigned>(std::ceil(N / T));
integrate<false><<<nr_blocks, T, T * sizeof(float4)>>>(dt, N,
(float4*) thrust::raw_pointer_cast(&ps_d[current_read]),
(float4*) thrust::raw_pointer_cast(&ps_d[current_write]),
NULL);
cudaThreadSynchronize();
std::swap(current_read, current_write);
}
}
|
3db585d9209e4de284c22461688f9cbd776e6da6.hip
|
// !!! This is a file automatically generated by hipify!!!
//#ifndef __HIPCC__
// #define __HIPCC__
//#endif
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_renderer.cuh"
#include "Vector3D.cuh"
#include "Matrix_hip.cuh"
#include "Color.cuh"
#include "Camera.cuh"
#include "IGeometry.cuh"
#include "Plane.cuh"
#include "Node.cuh"
#include "IShader.cuh"
#include "Lambert.cuh"
#include "Sphere.cuh"
#include "OrenNayar.cuh"
#include "Phong.cuh"
#include "Refraction.cuh"
#include "Transform.cuh"
#include "Reflection.cuh"
#include "Layered.cuh"
#include "Fresnel.cuh"
#include "CameraController.cuh"
#include "RaytracerControls.cuh"
#include "Settings.cuh"
#include "WaterWaves.cuh"
#include "Scene.cuh"
__device__
bool needsAA[VFB_MAX_SIZE * VFB_MAX_SIZE];
__device__
Color colorBuffer[VFB_MAX_SIZE * VFB_MAX_SIZE];
__device__
CameraController* controller;
__device__ Scene* scene;
__device__
bool testVisibility(const Vector& from, const Vector& to)
{
Ray ray;
ray.start = from;
ray.dir = to - from;
ray.dir.normalize();
IntersectionData temp;
temp.dist = (to - from).length();
for (int i = 0; i < scene->dev_nodes.size(); ++i)
{
if (scene->dev_nodes[i]->intersect(ray, temp))
{
return false;
}
}
return true;
}
__device__
Node* createNode(Geometry* geom, Shader* shader, Texture* tex)
{
scene->dev_geom.push_back(geom);
scene->dev_shaders.push_back(shader);
scene->dev_textures.push_back(tex);
Node* node = new Node(geom, shader, tex);
scene->dev_nodes.push_back(node);
return node;
}
__global__
void initializeScene(short sceneID, int RES_X, int RES_Y, bool realTime)
{
precomputeColorCache();
scene = new Scene;
scene->dev_cam = new Camera(0.0, 0.0, 0.0, 90.0, static_cast<float>(RES_X) / RES_Y);
scene->dev_cam->pos = Vector(0, 150, -100);
controller = new CameraController(*(scene->dev_cam), 100.f);
if (realTime)
{
scene->dev_lights.push_back(new PointLight(Vector(0, 296, 200), Color(1, 1, 1), 50000));
}
else
{
scene->dev_lights.push_back(new RectLight(Vector(0, 296, 200), Vector(0, 0, 0), Vector(50, 34, 34), Color(1, 1, 1), 20, 6, 6));
}
switch (sceneID)
{
case CORNELL_BOX:
{
Node* floor = createNode(new Plane(0, 150, 150), new Lambert(Color(0xF5E08C)));
floor->transform.translate(Vector(0, 0, 150));
floor->transform.scale(1, 1, 1);
Layered* mirror = new Layered;
mirror->addLayer(new Reflection(), Color(1, 1, 1), new Fresnel(10.0));
Node* BackWall = createNode(new Plane(0, 150, 150), new Lambert(Color(0xF5E08C)));
BackWall->transform.rotate(0, 90, 0);
BackWall->transform.translate(Vector(0, 150, 300));
BackWall->transform.scale(1, 1, 1);
Node* SideWallLeft = createNode(new Plane(0, 150, 150), new Lambert(Color(1.0, 0.0, 0.0)));
SideWallLeft->transform.rotate(0, 0, 90);
SideWallLeft->transform.translate(Vector(-150, 150, 150));
SideWallLeft->transform.scale(1, 1, 1);
Node* SideWallRight = createNode(new Plane(0, 150, 150), new Lambert(Color(0.0, 0.0, 1.0)));
SideWallRight->transform.rotate(0, 0, 90);
SideWallRight->transform.translate(Vector(150, 150, 150));
SideWallRight->transform.scale(1, 1, 1);
Node* Roof = createNode(new Plane(0, 150, 150), new Lambert(Color(0xF5E08C)));
Roof->transform.translate(Vector(0, 300, 150));
Roof->transform.scale(1, 1, 1);
Layered* moreGlossy = new Layered;
moreGlossy->addLayer(new Phong(Color(0.0, 0.0, 1.0), 32), Color(1.0, 1.0, 1.0));
moreGlossy->addLayer(new Reflection(Color(1.0, 1.0, 1.0)), Color(1, 1, 1), new Fresnel(1.8));
Node* ball = createNode(new Sphere(Vector(0, 0, 0), 40.0), moreGlossy);
ball->transform.translate(Vector(0, 50, 200));
Node* rectMirror = createNode(new Plane(0, 60, 80), mirror);
rectMirror->transform.rotate(0, 90, 0);
rectMirror->transform.translate(Vector(0, 120, 298));
rectMirror->transform.scale(1, 1, 1);
break;
}
case ROAMING:
{
Node* blueBall = createNode(new Sphere(Vector(0, 0, 0), 40.0), new Phong(Color(0, 0, 1), 32));
blueBall->transform.translate(Vector(0, 50, 200));
Node* redBall = createNode(new Sphere(Vector(0, 0, 0), 40.0), new Phong(Color(1, 0, 0), 32));
redBall->transform.translate(Vector(-120, 50, 200));
Node* greenBall = createNode(new Sphere(Vector(0, 0, 0), 40.0), new Phong(Color(0, 1, 0), 32));
greenBall->transform.translate(Vector(-240, 50, 200));
break;
}
case SEA:
{
scene->dev_cam->pos = Vector(0, 10, 200);
scene->dev_cam->yaw = 45.0;
// ocean floor
Node* oceanFloor = createNode(new Plane(0, 1000, 1000), new Lambert(Color(0xF5E08C))); // 0.1448, 0.4742, 0.6804 0x0AB6FF - blueish
oceanFloor->transform.translate(Vector(0, -300, 600));
oceanFloor->transform.scale(1, 1, 1);
Layered* water = new Layered;
water->addLayer(new Refraction(Color(0.9, 0.9, 0.9), 1.33), Color(1.0, 1.0, 1.0));
water->addLayer(new Reflection(Color(0x0AB6FF)), Color(1.0, 1.0, 1.0), new Fresnel(1.5));
Node* waterGeom = createNode(new Plane(0, 100, 100), water, new WaterWaves(0.3));
waterGeom->transform.scale(10, 1, 10);
waterGeom->transform.translate(Vector(0, 0, 600));
Node* island = createNode(new Sphere(Vector(0, 0, 0), 100.0), new Lambert(Color(0, 1, 0)));
island->transform.scale(8, 2, 7);
island->transform.translate(Vector(10, -20, 1000));
break;
}
case ROAMING_V2:
{
scene->dev_cam->pos = Vector(40, 140, 0);
scene->dev_cam->yaw = 40.0;
scene->dev_cam->pitch = -30.0;
//scene->dev_cam->roll = 15.0;
Layered* glossy = new Layered;
glossy->addLayer(new OrenNayar(Color(0.5, 0.5, 0.5), 1.0), Color(1.0, 1.0, 1.0));
glossy->addLayer(new Reflection(Color(1.0, 1.0, 1.0)), Color(1, 1, 1), new Fresnel(1.8));
Layered* mirror = new Layered;
mirror->addLayer(new Reflection(), Color(0.8, 0.8, 0.9), new Fresnel(10.0));
Node* floor = createNode(new Plane(0), glossy);
Node* blueBall = createNode(new Sphere(Vector(0, 0, 0), 40.0), new OrenNayar(Color(0.2, 0, 1), 1.0));
blueBall->transform.translate(Vector(0, 50, 200));
Node* refrBall = createNode(new Sphere(Vector(0, 0, 0), 40.0), new Refraction(Color(0.7, 0.9, 0.7), 2.3));
refrBall->transform.translate(Vector(-120, 50, 60));
Node* mirrorBall = createNode(new Sphere(Vector(0, 0, 0), 40.0), mirror);
mirrorBall->transform.translate(Vector(-220, 50, 220));
}
default:
break;
}
scene->dev_cam->beginFrame();
}
__global__
void update(double elapsedTime, double currentTime)
{
scene->secondsElapsed = elapsedTime;
scene->waves = currentTime;
// record in which part of the second we are at [0.0; 0.9]
// and multiply it by PI, so we can get a value in the interval [0.0; PI]
scene->timeInSecond = ((currentTime - elapsedTime) - static_cast<int>(currentTime - elapsedTime)) * PI;
}
extern "C"
void updateScene(const double& elapsedTime, const double& currentTime)
{
hipLaunchKernelGGL(( update), dim3(1), dim3(1), 0, 0, elapsedTime, currentTime);
}
__device__
Color BackgroundColor(const double& decay)
{
return Color(0.55f, 0.8f, 0.95f) * decay;
}
__device__
Color Fog(const double& decay)
{
return scene->fogColor * (1.0 - decay);
}
__device__
Color raytrace(Ray ray)
{
IntersectionData data;
Node* closestNode = nullptr;
if (ray.depth > MAX_RAY_DEPTH)
{
return Color(0, 0, 0);
}
data.dist = 1e99;
for (int i = 0; i < scene->dev_nodes.size(); ++i)
{
if (scene->dev_nodes[i]->intersect(ray, data))
{
closestNode = scene->dev_nodes[i];
}
}
// check if the closest intersection point is actually a light:
bool hitLight = false;
Color hitLightColor;
for (int i = 0; i < scene->dev_lights.size(); ++i)
{
if (scene->dev_lights[i]->intersect(ray, data.dist))
{
hitLight = true;
hitLightColor = scene->dev_lights[i]->getColor();
}
}
if (hitLight) return hitLightColor;
double expDecay;
if (scene->isFogActive)
{
double minDist = data.dist;
//scene->expDecay = pow(0.5, minDist / scene->fogDensity);
expDecay = pow(0.5, minDist / scene->fogDensity);
}
else
{
//scene->expDecay = 1.0;
expDecay = 1.0;
}
if (!closestNode)
{
return BackgroundColor(expDecay) + Fog(expDecay); // skyblue
}
if (closestNode->bumpTex != nullptr)
{
closestNode->bumpTex->modifyNormal(data);
}
if (closestNode == scene->selectedNode)
{
return closestNode->shader->shade(ray, data) * sin(scene->timeInSecond) * 1.5;
}
return closestNode->shader->shade(ray, data) * expDecay + Fog(expDecay);
}
/**
* @brief - The function checks if one of the red, green or blue components
* of the colors a and b are too different.
* @return true - if the difference is bigger than the THRESHOLD
* @return false - if the difference is lower than the THRESHOLD
*/
__device__
inline bool tooDifferent(const Color& a, const Color& b)
{
const float THRESHOLD = 0.1; // max color threshold; if met on any of the three channels, consider the colors too different
for (int comp = 0; comp < 3; comp++) {
float theMax = dev_max(a[comp], b[comp]);
float theMin = dev_min(a[comp], b[comp]);
// compare a single channel of the two colors. If the difference between them is large,
// but they aren't overexposed, the difference will be visible: needs anti-aliasing.
if (theMax - theMin > THRESHOLD && theMin < 1.33f)
return true;
}
return false;
}
__global__
void toGrayscale(uchar4* dev_vfb, int RES_X, int RES_Y)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
dev_vfb[offset].x = convertTo8bit_sRGB_cached(colorBuffer[offset].intensityPerceptual());
dev_vfb[offset].y = convertTo8bit_sRGB_cached(colorBuffer[offset].intensityPerceptual());
dev_vfb[offset].z = convertTo8bit_sRGB_cached(colorBuffer[offset].intensityPerceptual());
}
__global__
void blurScene(uchar4* dev_vfb, int RES_X, int RES_Y)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
Color result = colorBuffer[offset];
// take just straight up-down and right-left neighbours
result += colorBuffer[(x > 0 ? x - 1 : x) + y * blockDim.x * gridDim.x] +
colorBuffer[(x + 1 < RES_X ? x + 1 : x) + y * blockDim.x * gridDim.x] +
colorBuffer[x + (y > 0 ? y - 1 : y) * blockDim.x * gridDim.x] +
colorBuffer[x + (y + 1 < RES_Y ? y + 1 : y) * blockDim.x * gridDim.x];
colorBuffer[offset] = result / 5.0f;
// take all neighbours (up-down, right-left and diagonals)
//result += colorBuffer[(x > 0 ? x - 1 : x) + y * blockDim.x * gridDim.x] +
// colorBuffer[(x > 0 ? x - 1 : x) + (y > 0 ? y - 1 : y) * blockDim.x * gridDim.x] +
// colorBuffer[(x + 1 < RES_X ? x + 1 : x) + y * blockDim.x * gridDim.x] +
// colorBuffer[(x + 1 < RES_X ? x + 1 : x) + (y + 1 < RES_Y ? y + 1 : y) * blockDim.x * gridDim.x] +
// colorBuffer[x + (y > 0 ? y - 1 : y) * blockDim.x * gridDim.x] +
// colorBuffer[(x > 0 ? x - 1 : x) + (y > 0 ? y - 1 : y) * blockDim.x * gridDim.x] +
// colorBuffer[x + (y + 1 < RES_Y ? y + 1 : y) * blockDim.x * gridDim.x] +
// colorBuffer[(x < 0 ? x + 1 : x) + (y + 1 < RES_Y ? y + 1 : y) * blockDim.x * gridDim.x];
//
//colorBuffer[offset] = result / static_cast<float>(9.0);
dev_vfb[offset].x = convertTo8bit_sRGB_cached(colorBuffer[offset].r);
dev_vfb[offset].y = convertTo8bit_sRGB_cached(colorBuffer[offset].g);
dev_vfb[offset].z = convertTo8bit_sRGB_cached(colorBuffer[offset].b);
}
__global__
void antiAliasing(uchar4* dev_vfb, bool previewAA, int RES_X, int RES_Y)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
const int n_size = 5;
Color neighs[n_size];
neighs[0] = colorBuffer[offset];
neighs[1] = colorBuffer[(x > 0 ? x - 1 : x) + y * blockDim.x * gridDim.x];
neighs[2] = colorBuffer[(x + 1 < RES_X ? x + 1 : x) + y * blockDim.x * gridDim.x];
neighs[3] = colorBuffer[x + (y > 0 ? y - 1 : y) * blockDim.x * gridDim.x];
neighs[4] = colorBuffer[x + (y + 1 < RES_Y ? y + 1 : y) * blockDim.x * gridDim.x];
Color average(0, 0, 0);
for (int i = 0; i < n_size; ++i)
{
average += neighs[i];
}
average /= static_cast<float>(n_size);
for (int i = 0; i < n_size; ++i)
{
if (tooDifferent(neighs[i], average))
{
needsAA[offset] = true;
break;
}
}
const double kernel[5][2] = {
{ 0, 0 },
{ 0.3, 0.3 },
{ 0.6, 0 },
{ 0, 0.6 },
{ 0.6, 0.6 },
};
if (previewAA)
{
if (needsAA[offset])
{
dev_vfb[offset].x = 255;
dev_vfb[offset].y = 0;
dev_vfb[offset].z = 0;
}
}
else
{
if (needsAA[offset])
{
Color result = colorBuffer[offset];
for (int i = 1; i < n_size; ++i)
{
result += raytrace(scene->dev_cam->getScreenRay(x + kernel[i][0], y + kernel[i][1], RES_X, RES_Y));
}
colorBuffer[offset] = result / static_cast<float>(n_size);
dev_vfb[offset].x = convertTo8bit_sRGB_cached(colorBuffer[offset].r);
dev_vfb[offset].y = convertTo8bit_sRGB_cached(colorBuffer[offset].g);
dev_vfb[offset].z = convertTo8bit_sRGB_cached(colorBuffer[offset].b);
}
}
}
__global__
void renderScene(uchar4* dev_vfb, int RES_X, int RES_Y)
{
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
if (offset < RES_X * RES_Y)
{
colorBuffer[offset] = raytrace(scene->dev_cam->getScreenRay(x, y, RES_X, RES_Y));
dev_vfb[offset].x = convertTo8bit_sRGB_cached(colorBuffer[offset].r);
dev_vfb[offset].y = convertTo8bit_sRGB_cached(colorBuffer[offset].g);
dev_vfb[offset].z = convertTo8bit_sRGB_cached(colorBuffer[offset].b);
}
}
__global__
void freeMemory()
{
delete controller;
delete scene;
}
/**
* Wrapper kernel function
*/
extern "C"
void initScene()
{
hipLaunchKernelGGL(( initializeScene), dim3(1), dim3(1), 0, 0, GlobalSettings::sceneID,
GlobalSettings::RES_X, GlobalSettings::RES_Y,
GlobalSettings::realTime);
}
__global__
void camBeginFrame()
{
scene->dev_cam->beginFrame();
}
extern "C"
void cameraBeginFrame()
{
hipLaunchKernelGGL(( camBeginFrame), dim3(1), dim3(1), 0, 0, );
}
extern "C"
void cudaRenderer(uchar4* dev_vfb)
{
dim3 THREADS_PER_BLOCK(4, 8); // 4*8 - most optimal; 32*32 = 1024 (max threads per block supported)
dim3 BLOCKS(GlobalSettings::RES_X / THREADS_PER_BLOCK.x, GlobalSettings::RES_Y / THREADS_PER_BLOCK.y);
// first pass
hipLaunchKernelGGL(( renderScene), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, dev_vfb, GlobalSettings::RES_X, GlobalSettings::RES_Y);
if (GlobalSettings::AAEnabled)
{
//second pass
hipLaunchKernelGGL(( antiAliasing), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, dev_vfb, GlobalSettings::previewAA, GlobalSettings::RES_X, GlobalSettings::RES_Y);
}
if (GlobalSettings::blur)
{
hipLaunchKernelGGL(( blurScene), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, dev_vfb, GlobalSettings::RES_X, GlobalSettings::RES_Y);
}
if (GlobalSettings::grayscale)
{
hipLaunchKernelGGL(( toGrayscale), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, dev_vfb, GlobalSettings::RES_X, GlobalSettings::RES_Y);
}
}
extern "C"
void freeDeviceMemory()
{
hipLaunchKernelGGL(( freeMemory), dim3(1), dim3(1), 0, 0, );
}
|
3db585d9209e4de284c22461688f9cbd776e6da6.cu
|
//#ifndef __CUDACC__
// #define __CUDACC__
//#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_renderer.cuh"
#include "Vector3D.cuh"
#include "Matrix.cuh"
#include "Color.cuh"
#include "Camera.cuh"
#include "IGeometry.cuh"
#include "Plane.cuh"
#include "Node.cuh"
#include "IShader.cuh"
#include "Lambert.cuh"
#include "Sphere.cuh"
#include "OrenNayar.cuh"
#include "Phong.cuh"
#include "Refraction.cuh"
#include "Transform.cuh"
#include "Reflection.cuh"
#include "Layered.cuh"
#include "Fresnel.cuh"
#include "CameraController.cuh"
#include "RaytracerControls.cuh"
#include "Settings.cuh"
#include "WaterWaves.cuh"
#include "Scene.cuh"
__device__
bool needsAA[VFB_MAX_SIZE * VFB_MAX_SIZE];
__device__
Color colorBuffer[VFB_MAX_SIZE * VFB_MAX_SIZE];
__device__
CameraController* controller;
__device__ Scene* scene;
__device__
bool testVisibility(const Vector& from, const Vector& to)
{
Ray ray;
ray.start = from;
ray.dir = to - from;
ray.dir.normalize();
IntersectionData temp;
temp.dist = (to - from).length();
for (int i = 0; i < scene->dev_nodes.size(); ++i)
{
if (scene->dev_nodes[i]->intersect(ray, temp))
{
return false;
}
}
return true;
}
__device__
Node* createNode(Geometry* geom, Shader* shader, Texture* tex)
{
scene->dev_geom.push_back(geom);
scene->dev_shaders.push_back(shader);
scene->dev_textures.push_back(tex);
Node* node = new Node(geom, shader, tex);
scene->dev_nodes.push_back(node);
return node;
}
__global__
void initializeScene(short sceneID, int RES_X, int RES_Y, bool realTime)
{
precomputeColorCache();
scene = new Scene;
scene->dev_cam = new Camera(0.0, 0.0, 0.0, 90.0, static_cast<float>(RES_X) / RES_Y);
scene->dev_cam->pos = Vector(0, 150, -100);
controller = new CameraController(*(scene->dev_cam), 100.f);
if (realTime)
{
scene->dev_lights.push_back(new PointLight(Vector(0, 296, 200), Color(1, 1, 1), 50000));
}
else
{
scene->dev_lights.push_back(new RectLight(Vector(0, 296, 200), Vector(0, 0, 0), Vector(50, 34, 34), Color(1, 1, 1), 20, 6, 6));
}
switch (sceneID)
{
case CORNELL_BOX:
{
Node* floor = createNode(new Plane(0, 150, 150), new Lambert(Color(0xF5E08C)));
floor->transform.translate(Vector(0, 0, 150));
floor->transform.scale(1, 1, 1);
Layered* mirror = new Layered;
mirror->addLayer(new Reflection(), Color(1, 1, 1), new Fresnel(10.0));
Node* BackWall = createNode(new Plane(0, 150, 150), new Lambert(Color(0xF5E08C)));
BackWall->transform.rotate(0, 90, 0);
BackWall->transform.translate(Vector(0, 150, 300));
BackWall->transform.scale(1, 1, 1);
Node* SideWallLeft = createNode(new Plane(0, 150, 150), new Lambert(Color(1.0, 0.0, 0.0)));
SideWallLeft->transform.rotate(0, 0, 90);
SideWallLeft->transform.translate(Vector(-150, 150, 150));
SideWallLeft->transform.scale(1, 1, 1);
Node* SideWallRight = createNode(new Plane(0, 150, 150), new Lambert(Color(0.0, 0.0, 1.0)));
SideWallRight->transform.rotate(0, 0, 90);
SideWallRight->transform.translate(Vector(150, 150, 150));
SideWallRight->transform.scale(1, 1, 1);
Node* Roof = createNode(new Plane(0, 150, 150), new Lambert(Color(0xF5E08C)));
Roof->transform.translate(Vector(0, 300, 150));
Roof->transform.scale(1, 1, 1);
Layered* moreGlossy = new Layered;
moreGlossy->addLayer(new Phong(Color(0.0, 0.0, 1.0), 32), Color(1.0, 1.0, 1.0));
moreGlossy->addLayer(new Reflection(Color(1.0, 1.0, 1.0)), Color(1, 1, 1), new Fresnel(1.8));
Node* ball = createNode(new Sphere(Vector(0, 0, 0), 40.0), moreGlossy);
ball->transform.translate(Vector(0, 50, 200));
Node* rectMirror = createNode(new Plane(0, 60, 80), mirror);
rectMirror->transform.rotate(0, 90, 0);
rectMirror->transform.translate(Vector(0, 120, 298));
rectMirror->transform.scale(1, 1, 1);
break;
}
case ROAMING:
{
Node* blueBall = createNode(new Sphere(Vector(0, 0, 0), 40.0), new Phong(Color(0, 0, 1), 32));
blueBall->transform.translate(Vector(0, 50, 200));
Node* redBall = createNode(new Sphere(Vector(0, 0, 0), 40.0), new Phong(Color(1, 0, 0), 32));
redBall->transform.translate(Vector(-120, 50, 200));
Node* greenBall = createNode(new Sphere(Vector(0, 0, 0), 40.0), new Phong(Color(0, 1, 0), 32));
greenBall->transform.translate(Vector(-240, 50, 200));
break;
}
case SEA:
{
scene->dev_cam->pos = Vector(0, 10, 200);
scene->dev_cam->yaw = 45.0;
// ocean floor
Node* oceanFloor = createNode(new Plane(0, 1000, 1000), new Lambert(Color(0xF5E08C))); // 0.1448, 0.4742, 0.6804 0x0AB6FF - blueish
oceanFloor->transform.translate(Vector(0, -300, 600));
oceanFloor->transform.scale(1, 1, 1);
Layered* water = new Layered;
water->addLayer(new Refraction(Color(0.9, 0.9, 0.9), 1.33), Color(1.0, 1.0, 1.0));
water->addLayer(new Reflection(Color(0x0AB6FF)), Color(1.0, 1.0, 1.0), new Fresnel(1.5));
Node* waterGeom = createNode(new Plane(0, 100, 100), water, new WaterWaves(0.3));
waterGeom->transform.scale(10, 1, 10);
waterGeom->transform.translate(Vector(0, 0, 600));
Node* island = createNode(new Sphere(Vector(0, 0, 0), 100.0), new Lambert(Color(0, 1, 0)));
island->transform.scale(8, 2, 7);
island->transform.translate(Vector(10, -20, 1000));
break;
}
case ROAMING_V2:
{
scene->dev_cam->pos = Vector(40, 140, 0);
scene->dev_cam->yaw = 40.0;
scene->dev_cam->pitch = -30.0;
//scene->dev_cam->roll = 15.0;
Layered* glossy = new Layered;
glossy->addLayer(new OrenNayar(Color(0.5, 0.5, 0.5), 1.0), Color(1.0, 1.0, 1.0));
glossy->addLayer(new Reflection(Color(1.0, 1.0, 1.0)), Color(1, 1, 1), new Fresnel(1.8));
Layered* mirror = new Layered;
mirror->addLayer(new Reflection(), Color(0.8, 0.8, 0.9), new Fresnel(10.0));
Node* floor = createNode(new Plane(0), glossy);
Node* blueBall = createNode(new Sphere(Vector(0, 0, 0), 40.0), new OrenNayar(Color(0.2, 0, 1), 1.0));
blueBall->transform.translate(Vector(0, 50, 200));
Node* refrBall = createNode(new Sphere(Vector(0, 0, 0), 40.0), new Refraction(Color(0.7, 0.9, 0.7), 2.3));
refrBall->transform.translate(Vector(-120, 50, 60));
Node* mirrorBall = createNode(new Sphere(Vector(0, 0, 0), 40.0), mirror);
mirrorBall->transform.translate(Vector(-220, 50, 220));
}
default:
break;
}
scene->dev_cam->beginFrame();
}
__global__
void update(double elapsedTime, double currentTime)
{
scene->secondsElapsed = elapsedTime;
scene->waves = currentTime;
// record in which part of the second we are at [0.0; 0.9]
// and multiply it by PI, so we can get a value in the interval [0.0; PI]
scene->timeInSecond = ((currentTime - elapsedTime) - static_cast<int>(currentTime - elapsedTime)) * PI;
}
extern "C"
void updateScene(const double& elapsedTime, const double& currentTime)
{
update<<<1, 1>>>(elapsedTime, currentTime);
}
__device__
Color BackgroundColor(const double& decay)
{
return Color(0.55f, 0.8f, 0.95f) * decay;
}
__device__
Color Fog(const double& decay)
{
return scene->fogColor * (1.0 - decay);
}
__device__
Color raytrace(Ray ray)
{
IntersectionData data;
Node* closestNode = nullptr;
if (ray.depth > MAX_RAY_DEPTH)
{
return Color(0, 0, 0);
}
data.dist = 1e99;
for (int i = 0; i < scene->dev_nodes.size(); ++i)
{
if (scene->dev_nodes[i]->intersect(ray, data))
{
closestNode = scene->dev_nodes[i];
}
}
// check if the closest intersection point is actually a light:
bool hitLight = false;
Color hitLightColor;
for (int i = 0; i < scene->dev_lights.size(); ++i)
{
if (scene->dev_lights[i]->intersect(ray, data.dist))
{
hitLight = true;
hitLightColor = scene->dev_lights[i]->getColor();
}
}
if (hitLight) return hitLightColor;
double expDecay;
if (scene->isFogActive)
{
double minDist = data.dist;
//scene->expDecay = pow(0.5, minDist / scene->fogDensity);
expDecay = pow(0.5, minDist / scene->fogDensity);
}
else
{
//scene->expDecay = 1.0;
expDecay = 1.0;
}
if (!closestNode)
{
return BackgroundColor(expDecay) + Fog(expDecay); // skyblue
}
if (closestNode->bumpTex != nullptr)
{
closestNode->bumpTex->modifyNormal(data);
}
if (closestNode == scene->selectedNode)
{
return closestNode->shader->shade(ray, data) * sin(scene->timeInSecond) * 1.5;
}
return closestNode->shader->shade(ray, data) * expDecay + Fog(expDecay);
}
/**
* @brief - The function checks if one of the red, green or blue components
* of the colors a and b are too different.
* @return true - if the difference is bigger than the THRESHOLD
* @return false - if the difference is lower than the THRESHOLD
*/
__device__
inline bool tooDifferent(const Color& a, const Color& b)
{
const float THRESHOLD = 0.1; // max color threshold; if met on any of the three channels, consider the colors too different
for (int comp = 0; comp < 3; comp++) {
float theMax = dev_max(a[comp], b[comp]);
float theMin = dev_min(a[comp], b[comp]);
// compare a single channel of the two colors. If the difference between them is large,
// but they aren't overexposed, the difference will be visible: needs anti-aliasing.
if (theMax - theMin > THRESHOLD && theMin < 1.33f)
return true;
}
return false;
}
__global__
void toGrayscale(uchar4* dev_vfb, int RES_X, int RES_Y)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
dev_vfb[offset].x = convertTo8bit_sRGB_cached(colorBuffer[offset].intensityPerceptual());
dev_vfb[offset].y = convertTo8bit_sRGB_cached(colorBuffer[offset].intensityPerceptual());
dev_vfb[offset].z = convertTo8bit_sRGB_cached(colorBuffer[offset].intensityPerceptual());
}
__global__
void blurScene(uchar4* dev_vfb, int RES_X, int RES_Y)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
Color result = colorBuffer[offset];
// take just straight up-down and right-left neighbours
result += colorBuffer[(x > 0 ? x - 1 : x) + y * blockDim.x * gridDim.x] +
colorBuffer[(x + 1 < RES_X ? x + 1 : x) + y * blockDim.x * gridDim.x] +
colorBuffer[x + (y > 0 ? y - 1 : y) * blockDim.x * gridDim.x] +
colorBuffer[x + (y + 1 < RES_Y ? y + 1 : y) * blockDim.x * gridDim.x];
colorBuffer[offset] = result / 5.0f;
// take all neighbours (up-down, right-left and diagonals)
//result += colorBuffer[(x > 0 ? x - 1 : x) + y * blockDim.x * gridDim.x] +
// colorBuffer[(x > 0 ? x - 1 : x) + (y > 0 ? y - 1 : y) * blockDim.x * gridDim.x] +
// colorBuffer[(x + 1 < RES_X ? x + 1 : x) + y * blockDim.x * gridDim.x] +
// colorBuffer[(x + 1 < RES_X ? x + 1 : x) + (y + 1 < RES_Y ? y + 1 : y) * blockDim.x * gridDim.x] +
// colorBuffer[x + (y > 0 ? y - 1 : y) * blockDim.x * gridDim.x] +
// colorBuffer[(x > 0 ? x - 1 : x) + (y > 0 ? y - 1 : y) * blockDim.x * gridDim.x] +
// colorBuffer[x + (y + 1 < RES_Y ? y + 1 : y) * blockDim.x * gridDim.x] +
// colorBuffer[(x < 0 ? x + 1 : x) + (y + 1 < RES_Y ? y + 1 : y) * blockDim.x * gridDim.x];
//
//colorBuffer[offset] = result / static_cast<float>(9.0);
dev_vfb[offset].x = convertTo8bit_sRGB_cached(colorBuffer[offset].r);
dev_vfb[offset].y = convertTo8bit_sRGB_cached(colorBuffer[offset].g);
dev_vfb[offset].z = convertTo8bit_sRGB_cached(colorBuffer[offset].b);
}
__global__
void antiAliasing(uchar4* dev_vfb, bool previewAA, int RES_X, int RES_Y)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
const int n_size = 5;
Color neighs[n_size];
neighs[0] = colorBuffer[offset];
neighs[1] = colorBuffer[(x > 0 ? x - 1 : x) + y * blockDim.x * gridDim.x];
neighs[2] = colorBuffer[(x + 1 < RES_X ? x + 1 : x) + y * blockDim.x * gridDim.x];
neighs[3] = colorBuffer[x + (y > 0 ? y - 1 : y) * blockDim.x * gridDim.x];
neighs[4] = colorBuffer[x + (y + 1 < RES_Y ? y + 1 : y) * blockDim.x * gridDim.x];
Color average(0, 0, 0);
for (int i = 0; i < n_size; ++i)
{
average += neighs[i];
}
average /= static_cast<float>(n_size);
for (int i = 0; i < n_size; ++i)
{
if (tooDifferent(neighs[i], average))
{
needsAA[offset] = true;
break;
}
}
const double kernel[5][2] = {
{ 0, 0 },
{ 0.3, 0.3 },
{ 0.6, 0 },
{ 0, 0.6 },
{ 0.6, 0.6 },
};
if (previewAA)
{
if (needsAA[offset])
{
dev_vfb[offset].x = 255;
dev_vfb[offset].y = 0;
dev_vfb[offset].z = 0;
}
}
else
{
if (needsAA[offset])
{
Color result = colorBuffer[offset];
for (int i = 1; i < n_size; ++i)
{
result += raytrace(scene->dev_cam->getScreenRay(x + kernel[i][0], y + kernel[i][1], RES_X, RES_Y));
}
colorBuffer[offset] = result / static_cast<float>(n_size);
dev_vfb[offset].x = convertTo8bit_sRGB_cached(colorBuffer[offset].r);
dev_vfb[offset].y = convertTo8bit_sRGB_cached(colorBuffer[offset].g);
dev_vfb[offset].z = convertTo8bit_sRGB_cached(colorBuffer[offset].b);
}
}
}
__global__
void renderScene(uchar4* dev_vfb, int RES_X, int RES_Y)
{
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
if (offset < RES_X * RES_Y)
{
colorBuffer[offset] = raytrace(scene->dev_cam->getScreenRay(x, y, RES_X, RES_Y));
dev_vfb[offset].x = convertTo8bit_sRGB_cached(colorBuffer[offset].r);
dev_vfb[offset].y = convertTo8bit_sRGB_cached(colorBuffer[offset].g);
dev_vfb[offset].z = convertTo8bit_sRGB_cached(colorBuffer[offset].b);
}
}
__global__
void freeMemory()
{
delete controller;
delete scene;
}
/**
* Wrapper kernel function
*/
extern "C"
void initScene()
{
initializeScene<<<1, 1>>>(GlobalSettings::sceneID,
GlobalSettings::RES_X, GlobalSettings::RES_Y,
GlobalSettings::realTime);
}
__global__
void camBeginFrame()
{
scene->dev_cam->beginFrame();
}
extern "C"
void cameraBeginFrame()
{
camBeginFrame<<<1, 1>>>();
}
extern "C"
void cudaRenderer(uchar4* dev_vfb)
{
dim3 THREADS_PER_BLOCK(4, 8); // 4*8 - most optimal; 32*32 = 1024 (max threads per block supported)
dim3 BLOCKS(GlobalSettings::RES_X / THREADS_PER_BLOCK.x, GlobalSettings::RES_Y / THREADS_PER_BLOCK.y);
// first pass
renderScene<<<BLOCKS, THREADS_PER_BLOCK>>>(dev_vfb, GlobalSettings::RES_X, GlobalSettings::RES_Y);
if (GlobalSettings::AAEnabled)
{
//second pass
antiAliasing<<<BLOCKS, THREADS_PER_BLOCK>>>(dev_vfb, GlobalSettings::previewAA, GlobalSettings::RES_X, GlobalSettings::RES_Y);
}
if (GlobalSettings::blur)
{
blurScene<<<BLOCKS, THREADS_PER_BLOCK>>>(dev_vfb, GlobalSettings::RES_X, GlobalSettings::RES_Y);
}
if (GlobalSettings::grayscale)
{
toGrayscale<<<BLOCKS, THREADS_PER_BLOCK>>>(dev_vfb, GlobalSettings::RES_X, GlobalSettings::RES_Y);
}
}
extern "C"
void freeDeviceMemory()
{
freeMemory<<<1, 1>>>();
}
|
54500e52c55923fa0f35663988520e0926bc6a5c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <functional>
#include <numeric>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#endif
using namespace std;
using namespace thrust::placeholders;
unsigned long long int total_count = 0;
unsigned int total_segments = 0;
unsigned int total_max;
unsigned int process_count;
map <unsigned int, unsigned int> str_offset;
long long int totalRecs = 0;
bool fact_file_loaded = 0;
char map_check;
void* d_v = NULL;
void* s_v = NULL;
unsigned int oldCount;
queue<string> op_type;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<string> col_aliases;
void* alloced_tmp;
unsigned int alloced_sz = 0;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string,string> setMap; //map to keep track of column names and set names
struct is_match
{
__host__ __device__
bool operator()(unsigned int x)
{
return x != 4294967295;
}
};
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return !(((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
struct l_to_ui
{
__host__ __device__
float_type operator()(const int_type x)
{
return (unsigned int)x;
}
};
struct float_to_decimal
{
__host__ __device__
float_type operator()(const float_type x)
{
return (int_type)(x*100);
}
};
struct to_zero
{
__host__ __device__
bool operator()(const int_type x)
{
if(x == -1)
return 0;
else
return 1;
}
};
struct div_long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x, const float_type y)
{
return (float_type)x/y;
}
};
struct long_to_float
{
__host__ __device__
float_type operator()(const long long int x)
{
return (((float_type)x)/100.0);
}
};
// trim from start
static inline std::string <rim(std::string &s) {
s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun<int, int>(std::isspace))));
return s;
}
// trim from end
static inline std::string &rtrim(std::string &s) {
s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
return s;
}
// trim from both ends
static inline std::string &trim(std::string &s) {
return ltrim(rtrim(s));
}
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, unsigned int& count);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int count, unsigned int g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int count, unsigned int g_size);
void write_compressed_char(string file_name, unsigned int index, unsigned int mCount);
unsigned int largest_prm(CudaSet* a);
unsigned int max_tmp(CudaSet* a);
unsigned int curr_segment = 10000000;
size_t getFreeMem();
char zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a, unsigned int segment);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
keep = false;
partial_load = 0;
source = 1;
text_source = 1;
grp = NULL;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
keep = false;
partial_load = 1;
source = 1;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(unsigned int RecordCount, unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, int_type Recs, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b,Recs, op_sel, op_sel_as);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(unsigned int colIndex, unsigned int RecordCount)
{
if (type[colIndex] == 0) {
d_columns_int[type_index[colIndex]].resize(RecordCount);
}
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(RecordCount);
else {
void* d;
hipMalloc(&d, char_size[type_index[colIndex]]*RecordCount);
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
void CudaSet::decompress_char_hash(unsigned int colIndex, unsigned int segment, unsigned int i_cnt)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count, old_count;
const unsigned int len = char_size[type_index[colIndex]];
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
strcat(f1,".");
itoaa(segment,col_pos);
strcat(f1,col_pos);
FILE* f;
f = fopen (f1 , "rb" );
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
unsigned long long int* hashes = new unsigned long long int[sz];
for(unsigned int i = 0; i < sz ; i++) {
hashes[i] = MurmurHash64A(&d_array[i*len], len, hash_seed); // divide by 2 so it will fit into a signed long long
};
void* d;
hipMalloc((void **) &d, sz*int_size);
hipMemcpy( d, (void *) hashes, sz*8, hipMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> dd_int((unsigned long long int*)d);
delete[] d_array;
delete[] hashes;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
hipMalloc((void **) &d_val, vals_count*8);
hipMemcpy(d_val, (void *) int_array, vals_count*8, hipMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> mval((unsigned long long int*)d_val);
delete[] int_array;
void* d_int;
hipMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
void* d_v;
hipMalloc((void **) &d_v, 8);
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v);
dd_v[1] = fit_count;
dd_v[0] = bits_encoded;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)d_v);
thrust::for_each(begin, begin + real_count, ff);
//thrust::device_ptr<long long int> dd_int((long long int*)d);
thrust::device_ptr<unsigned int> dd_val((unsigned int*)d_int);
if(!prm.empty()) {
if(prm_index[segment] == 'R') {
thrust::device_ptr<int_type> d_tmp = thrust::device_malloc<int_type>(real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_tmp);
if(prm_d.size() == 0) // find the largest prm segment
prm_d.resize(largest_prm(this));
hipMemcpy((void**)(thrust::raw_pointer_cast(prm_d.data())), (void**)prm[segment],
4*prm_count[segment], hipMemcpyHostToDevice);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + prm_count[segment]);
thrust::gather(prm_d.begin(), prm_d.begin() + prm_count[segment], d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index[segment] == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
};
hipFree(d);
hipFree(d_val);
hipFree(d_v);
hipFree(d_int);
};
// takes a char column , hashes strings, copies them to a gpu
void CudaSet::add_hashed_strings(string field, unsigned int segment, unsigned int i_cnt)
{
unsigned int colInd2 = columnNames.find(field)->second;
CudaSet *t = varNames[setMap[field]];
if(not_compressed) { // decompressed strings on a host
unsigned int old_count;
unsigned long long int* hashes = new unsigned long long int[t->mRecCount];
for(unsigned int i = 0; i < t->mRecCount ; i++)
hashes[i] = MurmurHash64A(t->h_columns_char[t->type_index[colInd2]] + i*t->char_size[t->type_index[colInd2]], t->char_size[t->type_index[colInd2]], hash_seed);
if(!prm.empty()) {
if(prm_index[segment] == 'R') {
thrust::device_ptr<unsigned long long int> d_tmp = thrust::device_malloc<unsigned long long int>(t->mRecCount);
thrust::copy(hashes, hashes+mRecCount, d_tmp);
if(prm_d.size() == 0) // find the largest prm segment
prm_d.resize(largest_prm(this));
hipMemcpy((void**)(thrust::raw_pointer_cast(prm_d.data())), (void**)prm[segment],
4*prm_count[segment], hipMemcpyHostToDevice);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + prm_count[segment]);
thrust::gather(prm_d.begin(), prm_d.begin() + prm_count[segment], d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index[segment] == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
}
else { // hash the dictionary
decompress_char_hash(colInd2, segment, i_cnt);
};
};
void CudaSet::resize(unsigned int addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0) {
h_columns_int[type_index[i]].resize(mRecCount);
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(mRecCount);
}
else {
if (h_columns_char[type_index[i]]) {
if (mRecCount > prealloc_char_size) {
prealloc_char_size = mRecCount;
h_columns_char[type_index[i]] = (char*)realloc(h_columns_char[type_index[i]], (unsigned long long int)mRecCount*(unsigned long long int)char_size[type_index[i]]);
};
}
else {
h_columns_char[type_index[i]] = new char[(unsigned long long int)mRecCount*(unsigned long long int)char_size[type_index[i]]];
};
};
};
};
void CudaSet::reserve(unsigned int Recs)
{
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0)
h_columns_int[type_index[i]].reserve(Recs);
else if(type[i] == 1)
h_columns_float[type_index[i]].reserve(Recs);
else {
unsigned long long int sz = (unsigned long long int)Recs*(unsigned long long int)char_size[type_index[i]];
h_columns_char[type_index[i]] = new char[(unsigned long long int)Recs*(unsigned long long int)char_size[type_index[i]]];
if(h_columns_char[type_index[i]] == NULL) {
cout << "Could not allocate on a host " << Recs << " records of size " << char_size[type_index[i]] << endl;
exit(0);
};
prealloc_char_size = Recs;
};
};
};
void CudaSet::deAllocColumnOnDevice(unsigned int colIndex)
{
if (type[colIndex] == 0 && !d_columns_int.empty()) {
d_columns_int[type_index[colIndex]].resize(0);
d_columns_int[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 1 && !d_columns_float.empty()) {
d_columns_float[type_index[colIndex]].resize(0);
d_columns_float[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 2 && d_columns_char[type_index[colIndex]] != NULL) {
hipFree(d_columns_char[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = NULL;
};
};
void CudaSet::allocOnDevice(unsigned int RecordCount)
{
for(unsigned int i=0; i < mColumnCount; i++)
allocColumnOnDevice(i, RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i <mColumnCount; i++)
deAllocColumnOnDevice(i);
if(!columnGroups.empty() && mRecCount !=0) {
hipFree(grp);
grp = NULL;
};
if(!prm.empty()) { // free the sources
string some_field;
map<string,int>::iterator it=columnNames.begin();
some_field = (*it).first;
if(setMap[some_field].compare(name)) {
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(unsigned int RecCount, unsigned int colIndex)
{
if (RecCount) {
if (type[colIndex] == 0)
d_columns_int[type_index[colIndex]].resize(mRecCount+RecCount);
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(mRecCount+RecCount);
else {
if (d_columns_char[type_index[colIndex]] != NULL)
hipFree(d_columns_char[type_index[colIndex]]);
void *d;
hipMalloc((void **) &d, (mRecCount+RecCount)*char_size[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
};
void CudaSet::resizeDevice(unsigned int RecCount)
{
if (RecCount)
for(unsigned int i=0; i < mColumnCount; i++)
resizeDeviceColumn(RecCount, i);
};
bool CudaSet::onDevice(unsigned int i)
{
unsigned j = type_index[i];
if (type[i] == 0) {
if (d_columns_int.empty())
return 0;
if (d_columns_int[j].size() == 0)
return 0;
}
else if (type[i] == 1) {
if (d_columns_float.empty())
return 0;
if(d_columns_float[j].size() == 0)
return 0;
}
else if (type[i] == 2) {
if(d_columns_char.empty())
return 0;
if(d_columns_char[j] == NULL)
return 0;
};
return 1;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
a->columnNames[(*it).first] = (*it).second;
for(unsigned int i=0; i < mColumnCount; i++) {
a->cols[i] = cols[i];
a->type[i] = type[i];
if(a->type[i] == 0) {
a->d_columns_int.push_back(thrust::device_vector<int_type>());
a->h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >());
a->type_index[i] = a->d_columns_int.size()-1;
}
else if(a->type[i] == 1) {
a->d_columns_float.push_back(thrust::device_vector<float_type>());
a->h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
a->type_index[i] = a->d_columns_float.size()-1;
a->decimal[i] = decimal[i];
}
else {
a->h_columns_char.push_back(NULL);
a->d_columns_char.push_back(NULL);
a->type_index[i] = a->d_columns_char.size()-1;
};
};
a->char_size = char_size;
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
unsigned long long int CudaSet::readSegmentsFromFile(unsigned int segNum, unsigned int colIndex)
{
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
unsigned int cnt;
strcat(f1,".");
itoaa(segNum,col_pos);
strcat(f1,col_pos);
FILE* f;
f = fopen (f1 , "rb" );
if(type[colIndex] == 0) {
fread(h_columns_int[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_int[type_index[colIndex]].data()))[0];
fread((unsigned int*)(h_columns_int[type_index[colIndex]].data()) + 1, (cnt+8)*8 - 4, 1, f);
}
else if(type[colIndex] == 1) {
fread(h_columns_float[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_float[type_index[colIndex]].data()))[0];
fread((unsigned int*)(h_columns_float[type_index[colIndex]].data()) + 1, (cnt+8)*8 - 4, 1, f);
}
else {
decompress_char(f, colIndex, segNum);
};
fclose(f);
return 0;
};
void CudaSet::decompress_char(FILE* f, unsigned int colIndex, unsigned int segNum)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count;
const unsigned int len = char_size[type_index[colIndex]];
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
void* d;
hipMalloc((void **) &d, sz*len);
hipMemcpy( d, (void *) d_array, sz*len, hipMemcpyHostToDevice);
delete[] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
hipMalloc((void **) &d_val, vals_count*8);
hipMemcpy(d_val, (void *) int_array, vals_count*8, hipMemcpyHostToDevice);
delete[] int_array;
void* d_int;
hipMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
void* d_v;
hipMalloc((void **) &d_v, 8);
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v);
dd_v[1] = fit_count;
dd_v[0] = bits_encoded;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)d_v);
thrust::for_each(begin, begin + real_count, ff);
//thrust::device_ptr<unsigned int> dd_r((unsigned int*)d_int);
//for(int z = 0 ; z < 3; z++)
//cout << "DD " << dd_r[z] << endl;
//void* d_char;
//hipMalloc((void **) &d_char, real_count*len);
//hipMemset(d_char, 0, real_count*len);
//str_gather(d_int, real_count, d, d_char, len);
if(str_offset.count(colIndex) == 0)
str_offset[colIndex] = 0;
//cout << "str off " << str_offset[colIndex] << endl;
if(!alloced_switch)
str_gather(d_int, real_count, d, d_columns_char[type_index[colIndex]] + str_offset[colIndex]*len, len);
else
str_gather(d_int, real_count, d, alloced_tmp, len);
if(!prm.empty()) {
str_offset[colIndex] = str_offset[colIndex] + prm_count[segNum];
}
else {
str_offset[colIndex] = str_offset[colIndex] + real_count;
};
//if(d_columns_char[type_index[colIndex]])
// hipFree(d_columns_char[type_index[colIndex]]);
//d_columns_char[type_index[colIndex]] = (char*)d_char;
mRecCount = real_count;
hipFree(d);
hipFree(d_val);
hipFree(d_v);
hipFree(d_int);
}
void CudaSet::CopyToGpu(unsigned int offset, unsigned int count)
{
if (not_compressed) {
for(unsigned int i = 0; i < mColumnCount; i++) {
switch(type[i]) {
case 0 :
thrust::copy(h_columns_int[type_index[i]].begin() + offset, h_columns_int[type_index[i]].begin() + offset + count, d_columns_int[type_index[i]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[i]].begin() + offset, h_columns_float[type_index[i]].begin() + offset + count, d_columns_float[type_index[i]].begin());
break;
default :
hipMemcpy(d_columns_char[type_index[i]], h_columns_char[type_index[i]], char_size[type_index[i]]*(offset + count), hipMemcpyHostToDevice);
};
};
}
else
for(unsigned int i = 0; i < mColumnCount; i++)
CopyColumnToGpu(i, offset, count);
};
void CudaSet::CopyColumnToGpu(unsigned int colIndex, unsigned int segment)
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
if(!alloced_switch)
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_col);
};
break;
case 1 :
if(!alloced_switch)
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_col);
};
break;
default :
if(!alloced_switch)
hipMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, hipMemcpyHostToDevice);
else
hipMemcpy(alloced_tmp, h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, hipMemcpyHostToDevice);
};
}
else {
unsigned long long int data_offset;
if (partial_load)
data_offset = readSegmentsFromFile(segment,colIndex);
if(type[colIndex] != 2) {
if(d_v == NULL)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
};
if(type[colIndex] == 0) {
if(!alloced_switch) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data()), h_columns_int[type_index[colIndex]].data() + data_offset, d_v, s_v);
}
else {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[type_index[colIndex]].data() + data_offset, d_v, s_v);
};
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
if(!alloced_switch) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()) , h_columns_float[type_index[colIndex]].data() + data_offset, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin(), long_to_float());
}
else {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[type_index[colIndex]].data() + data_offset, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
}
//else // uncompressed float
//hipMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, hipMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex) // copy all segments
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
break;
default :
hipMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, hipMemcpyHostToDevice);
};
}
else {
long long int data_offset;
unsigned long long int totalRecs = 0;
if(d_v == NULL)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
str_offset[colIndex] = 0;
for(unsigned int i = 0; i < segCount; i++) {
if (partial_load)
data_offset = readSegmentsFromFile(i,colIndex);
if(type[colIndex] == 0) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + totalRecs), h_columns_int[type_index[colIndex]].data() + data_offset, d_v, s_v);
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs) , h_columns_float[type_index[colIndex]].data() + data_offset, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin() + totalRecs, long_to_float());
}
// else uncompressed float
//hipMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, hipMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
};
totalRecs = totalRecs + mRecCount;
};
mRecCount = totalRecs;
};
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex, unsigned int offset, unsigned int count)
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + offset + count, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + offset + count, d_columns_float[type_index[colIndex]].begin());
break;
default :
hipMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*(offset + count), hipMemcpyHostToDevice);
};
}
else {
};
}
void CudaSet::CopyColumnToHost(int colIndex, unsigned int offset, unsigned int RecCount)
{
switch(type[colIndex]) {
case 0 :
thrust::copy(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + RecCount, h_columns_int[type_index[colIndex]].begin() + offset);
break;
case 1 :
thrust::copy(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + RecCount, h_columns_float[type_index[colIndex]].begin() + offset);
break;
default :
hipMemcpy(h_columns_char[type_index[colIndex]] + offset*char_size[type_index[colIndex]], d_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*RecCount, hipMemcpyDeviceToHost);
}
}
void CudaSet::CopyColumnToHost(int colIndex)
{
CopyColumnToHost(colIndex, 0, mRecCount);
}
void CudaSet::CopyToHost(unsigned int offset, unsigned int count)
{
for(unsigned int i = 0; i < mColumnCount; i++) {
CopyColumnToHost(i, offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_int[type_index[colIndex]].data());
}
void CudaSet::GroupBy(stack<string> columnRef, unsigned int int_col_count)
{
int grpInd, colIndex;
if(grp)
hipFree(grp);
CUDA_SAFE_CALL(hipMalloc((void **) &grp, mRecCount * sizeof(bool)));
thrust::device_ptr<bool> d_grp(grp);
thrust::sequence(d_grp, d_grp+mRecCount, 0, 0);
thrust::device_ptr<bool> d_group = thrust::device_malloc<bool>(mRecCount);
d_group[mRecCount-1] = 1;
unsigned int i_count = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
columnGroups.push(columnRef.top()); // save for future references
colIndex = columnNames[columnRef.top()];
if(!onDevice(colIndex)) {
allocColumnOnDevice(colIndex,mRecCount);
CopyColumnToGpu(colIndex, mRecCount);
grpInd = 1;
}
else
grpInd = 0;
if (type[colIndex] == 0) { // int_type
thrust::transform(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_int[type_index[colIndex]].begin()+1, d_group, thrust::not_equal_to<int_type>());
}
else if (type[colIndex] == 1) { // float_type
thrust::transform(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_float[type_index[colIndex]].begin()+1, d_group, f_not_equal_to());
}
else { // Char
//str_grp(d_columns_char[type_index[colIndex]], mRecCount, d_group, char_size[type_index[colIndex]]);
//use int_type
thrust::transform(d_columns_int[int_col_count+i_count].begin(), d_columns_int[int_col_count+i_count].begin() + mRecCount - 1,
d_columns_int[int_col_count+i_count].begin()+1, d_group, thrust::not_equal_to<int_type>());
i_count++;
};
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<bool>());
if (grpInd == 1)
deAllocColumnOnDevice(colIndex);
};
thrust::device_free(d_group);
grp_count = thrust::count(d_grp, d_grp+mRecCount,1);
};
void CudaSet::addDeviceColumn(int_type* col, int colIndex, string colName, unsigned int recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
d_columns_int.push_back(thrust::device_vector<int_type>(recCount));
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
type_index[colIndex] = d_columns_int.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_int[type_index[colIndex]].size() < recCount) {
d_columns_int[type_index[colIndex]].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[type_index[colIndex]].begin());
};
void CudaSet::addDeviceColumn(float_type* col, int colIndex, string colName, unsigned int recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
d_columns_float.push_back(thrust::device_vector<float_type>(recCount));
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
type_index[colIndex] = d_columns_float.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_float[type_index[colIndex]].size() < recCount)
d_columns_float[type_index[colIndex]].resize(recCount);
};
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[type_index[colIndex]].begin());
};
void CudaSet::writeHeader(char* file_name, unsigned int col) {
char str[100];
char col_pos[3];
strcpy(str, file_name);
strcat(str,".");
itoaa(col,col_pos);
strcat(str,col_pos);
string ff = str;
strcat(str,".header");
fstream binary_file(str,ios::out|ios::binary|ios::app);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&total_segments, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
binary_file.close();
};
void CudaSet::Store(char* file_name, char* sep, unsigned int limit, bool binary )
{
if (mRecCount == 0 && binary == 1) { // write tails
for(unsigned int i = 0; i< mColumnCount; i++) {
writeHeader(file_name, cols[i]);
};
return;
};
unsigned int mCount;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else
mCount = mRecCount;
if(binary == 0) {
char buffer [33];
queue<string> op_vx;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
op_vx.push((*it).first);
curr_segment = 1000000;
FILE *file_pr = fopen(file_name, "w");
if (file_pr == NULL)
cout << "Could not open file " << file_name << endl;
if(prm.size() || source)
allocColumns(this, op_vx);
unsigned int curr_seg = 0, cnt = 0;
unsigned curr_count, sum_printed = 0;
while(sum_printed < mCount) {
// cout << "mcount " << mCount << " " << prm.size() << " " << keep << endl;
if(prm.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
// if host arrays are empty
unsigned int olRecs = mRecCount;
resize(mRecCount);
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount)
curr_count = mRecCount;
else {
curr_count = mCount - sum_printed;
};
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
string ss;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < mColumnCount; j++) {
if (type[j] == 0) {
sprintf(buffer, "%lld", (h_columns_int[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else if (type[j] == 1) {
sprintf(buffer, "%.2f", (h_columns_float[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else {
ss.assign(h_columns_char[type_index[j]] + (i*char_size[type_index[j]]), char_size[type_index[j]]);
trim(ss);
fputs(ss.c_str(), file_pr);
fputs(sep, file_pr);
};
};
if (i != mCount -1)
fputs("\n",file_pr);
};
curr_seg++;
};
fclose(file_pr);
}
else if(text_source) { //writing a binary file using a text file as a source
char str[100];
char col_pos[3];
total_count = total_count + mCount;
total_segments = total_segments + 1;
if (mCount > total_max)
total_max = mCount;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, mCount*float_size));
for(unsigned int i = 0; i< mColumnCount; i++) {
strcpy(str, file_name);
strcat(str,".");
itoaa(cols[i],col_pos);
strcat(str,col_pos);
curr_file = str;
strcat(str,".");
itoaa(total_segments-1,col_pos);
strcat(str,col_pos);
cout << "Writing to " << str << endl;
if(type[i] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::copy(h_columns_int[type_index[i]].begin(), h_columns_int[type_index[i]].begin() + mCount, d_col);
cout << "Compressing " << endl;
pfor_compress( d, mCount*int_size, str, h_columns_int[type_index[i]], 0, 0);
cout << "Compressed " << endl;
}
else if(type[i] == 1) {
if(decimal[i]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
thrust::copy(h_columns_float[type_index[i]].begin(), h_columns_float[type_index[i]].begin() + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
cout << "Compressing " << endl;
pfor_compress( d, mCount*float_size, str, h_columns_float[type_index[i]], 1, 0);
cout << "Compressed " << endl;
}
else { // do not compress -- float
fstream binary_file(str,ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[type_index[i]].data()),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else { //char
cout << "Compressing char" << endl;
compress_char(str, i, mCount);
cout << "Compressed char " << endl;
};
if(fact_file_loaded) {
writeHeader(file_name, cols[i]);
};
};
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 2)
deAllocColumnOnDevice(i);
hipFree(d);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
// do it for every segment
// will add this later
};
}
void CudaSet::compress_char(string file_name, unsigned int index, unsigned int mCount)
{
std::vector<string> v1;
std::map<string,unsigned int> dict;
std::vector<string> dict_ordered;
std::vector<unsigned int> dict_val;
map<string,unsigned int>::iterator iter;
unsigned int bits_encoded;
char* field;
unsigned int len = char_size[type_index[index]];
field = new char[len];
for (unsigned int i = 0 ; i < mCount; i++) {
strncpy(field, h_columns_char[type_index[index]] + i*len, char_size[type_index[index]]);
v1.push_back(field);
if((iter = dict.find(field)) != dict.end()) {
dict_val.push_back(iter->second);
}
else {
string f = field;
dict[f] = dict.size();
dict_val.push_back(dict.size()-1);
dict_ordered.push_back(f);
};
};
bits_encoded = (unsigned int)ceil(log2(double(dict.size()+1)));
char *cc = new char[len+1];
unsigned int sz = dict_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary);
binary_file.write((char *)&sz, 4);
for(unsigned int i = 0; i < dict_ordered.size(); i++) {
memset(&cc[0], 0, len);
strcpy(cc,dict_ordered[i].c_str());
binary_file.write(cc, len);
};
delete [] cc;
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, 8);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
void CudaSet::LoadFile(char* file_name, char* sep )
{
unsigned int count = 0;
char line[500];
char* field;
unsigned int current_column = 1;
FILE *file_ptr = fopen(file_name, "r");
if (file_ptr == NULL)
cout << "Could not open file " << file_name << endl;
unsigned int *seq = new unsigned int[mColumnCount];
thrust::sequence(seq, seq+mColumnCount,0,1);
thrust::stable_sort_by_key(cols, cols+mColumnCount, seq);
while (fgets(line, 500, file_ptr) != NULL ) {
current_column = 1;
field = strtok(line,sep);
for(unsigned int i = 0; i< mColumnCount; i++) {
while(cols[i] > current_column) {
field = strtok(NULL,sep);
current_column++;
};
if (type[seq[i]] == 0) {
if (strchr(field,'-') == NULL) {
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
}
else { // handling possible dates
strncpy(field+4,field+5,2);
strncpy(field+6,field+8,2);
field[8] = '\0';
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
};
}
else if (type[seq[i]] == 1)
(h_columns_float[type_index[seq[i]]])[count] = atoff(field);
else {
strcpy(h_columns_char[type_index[seq[i]]] + count*char_size[type_index[seq[i]]], field);
};
};
count++;
if (count == mRecCount) {
mRecCount = mRecCount + process_count;
resize(mRecCount);
};
};
fclose(file_ptr);
mRecCount = count;
};
int CudaSet::LoadBigFile(const char* file_name, const char* sep )
{
unsigned int count = 0;
char line[1000];
char* field;
unsigned int current_column = 1;
if (file_p == NULL)
file_p = fopen(file_name, "r");
if (file_p == NULL)
cout << "Could not open file " << file_name << endl;
if (seq == 0) {
seq = new unsigned int[mColumnCount];
thrust::sequence(seq, seq+mColumnCount,0,1);
thrust::stable_sort_by_key(cols, cols+mColumnCount, seq);
};
while (count < process_count && fgets(line, 1000, file_p) != NULL) {
current_column = 1;
field = strtok(line,sep);
for(unsigned int i = 0; i< mColumnCount; i++) {
while(cols[i] > current_column) {
field = strtok(NULL,sep);
current_column++;
};
if (type[seq[i]] == 0) {
if (strchr(field,'-') == NULL) {
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
}
else { // handling possible dates
strncpy(field+4,field+5,2);
strncpy(field+6,field+8,2);
field[8] = '\0';
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
};
}
else if (type[seq[i]] == 1)
(h_columns_float[type_index[seq[i]]])[count] = atoff(field);
else {//char
strcpy(h_columns_char[type_index[seq[i]]] + count*char_size[type_index[seq[i]]], field);
}
};
count++;
};
mRecCount = count;
if(count < process_count) {
fclose(file_p);
return 1;
}
else
return 0;
};
void CudaSet::free() {
if (!seq)
delete seq;
for(unsigned int i = 0; i < mColumnCount; i++ ) {
if(type[i] == 2 && h_columns_char[type_index[i]] && prm.empty()) {
delete [] h_columns_char[type_index[i]];
h_columns_char[type_index[i]] = NULL;
};
};
if(!prm.empty()) { // free the sources
string some_field;
map<string,int>::iterator it=columnNames.begin();
some_field = (*it).first;
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
delete type;
delete cols;
if(!columnGroups.empty() && mRecCount !=0 && grp != NULL)
hipFree(grp);
for(unsigned int i = 0; i < prm.size(); i++)
delete [] prm[i];
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
}
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
};
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name) // compressed data for DIM tables
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
unsigned int cnt;
file_p = NULL;
FILE* f;
char f1[100];
not_compressed = 0;
mRecCount = Recs;
load_file_name = file_name;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
strcpy(f1, file_name);
strcat(f1,".");
char col_pos[3];
itoaa(colsRef.front(),col_pos);
strcat(f1,col_pos); // read the size of a segment
strcat(f1, ".header");
f = fopen (f1 , "rb" );
for(unsigned int j = 0; j < 5; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
//cout << "creating " << f1 << " " << cnt << endl;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>(cnt + 9));
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type >());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
file_p = NULL;
mRecCount = Recs;
segCount = 1;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(unsigned int RecordCount, unsigned int ColumnCount)
{
mRecCount = RecordCount;
mColumnCount = ColumnCount;
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
seq = 0;
for(unsigned int i =0; i < mColumnCount; i++) {
cols[i] = i;
};
};
void CudaSet::initialize(CudaSet* a, CudaSet* b, int_type Recs, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = Recs;
mColumnCount = op_sel.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
maxRecs = b->maxRecs;
map<string,int>::iterator it;
seq = 0;
unsigned int i = 0;
segCount = 1;
not_compressed = 1;
col_aliases = op_sel_as;
queue<string> names(op_sel);
while(!names.empty()) {
columnNames[names.front()] = i;
names.pop();
i++;
};
unsigned int index;
for(unsigned int i=0; i < mColumnCount; i++) {
if((it = a->columnNames.find(op_sel.front())) != a->columnNames.end()) {
index = it->second;
cols[i] = i;
decimal[i] = a->decimal[i];
if ((a->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type>());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type>());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(a->char_size[a->type_index[index]]);
prealloc_char_size = 0;
};
}
else {
it = b->columnNames.find(op_sel.front());
index = it->second;
cols[i] = i;
decimal[i] = b->decimal[index];
if ((b->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((b->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(b->char_size[b->type_index[index]]);
prealloc_char_size = 0;
};
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 5;
else if (op_type == 1) // <
return 6;
else if (op_type == 6) // >=
return 1;
else if (op_type == 5) // <=
return 2;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
hipMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(!a->prm.empty()) {
unsigned int max_sz = max_tmp(a) ;
CudaSet* t = varNames[setMap[fields.front()]];
if(max_sz*t->maxRecs > alloced_sz) {
if(alloced_sz) {
hipFree(alloced_tmp);
};
hipMalloc((void **) &alloced_tmp, max_sz*t->maxRecs);
alloced_sz = max_sz*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(setMap.count(fields.front()) > 0) {
unsigned int idx = a->columnNames[fields.front()];
bool onDevice = 0;
if(a->type[idx] == 0) {
if(a->d_columns_int[a->type_index[idx]].size() > 0) {
onDevice = 1;
}
}
else if(a->type[idx] == 1) {
if(a->d_columns_float[a->type_index[idx]].size() > 0) {
onDevice = 1;
};
}
else {
if((a->d_columns_char[a->type_index[idx]]) != NULL) {
onDevice = 1;
};
};
if (!onDevice) {
if(a->prm.empty()) {
a->allocColumnOnDevice(idx, a->maxRecs);
}
else {
a->allocColumnOnDevice(idx, largest_prm(a));
};
};
};
fields.pop();
};
};
}
unsigned int largest_prm(CudaSet* a)
{
unsigned int maxx = 0;
for(unsigned int i = 0; i < a->prm_count.size(); i++)
if(maxx < a->prm_count[i])
maxx = a->prm_count[i];
if(maxx == 0)
maxx = a->maxRecs;
return maxx;
};
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, unsigned int& count)
{
unsigned int tindex = t->columnNames[field];
unsigned int idx = a->columnNames[field];
//find the largest possible size of a gathered segment
if(!a->onDevice(idx)) {
unsigned int max_count = 0;
for(unsigned int i = 0; i < a->prm.size(); i++)
if (a->prm_count[i] > max_count)
max_count = a->prm_count[i];
a->allocColumnOnDevice(idx, max_count);
};
unsigned int g_size = a->prm_count[segment];
if(a->prm_index[segment] == 'R') {
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a));
if(curr_segment != segment) {
hipMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[segment],
4*g_size, hipMemcpyHostToDevice);
curr_segment = segment;
};
mygather(tindex, idx, a, t, count, g_size);
}
else {
mycopy(tindex, idx, a, t, count, g_size);
};
a->mRecCount = g_size;
}
unsigned int getSegmentRecCount(CudaSet* a, unsigned int segment) {
if (segment == a->segCount-1) {
return oldCount - a->maxRecs*segment;
}
else
return a->maxRecs;
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, unsigned int& count)
{
set<string> uniques;
CudaSet *t;
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && setMap.count(fields.front()) > 0) {
if(!a->prm.empty()) {
t = varNames[setMap[fields.front()]];
if(a->prm_count[segment]) {
alloced_switch = 1;
//cout << "copy " << fields.front() << " " << alloced_switch << endl;
t->CopyColumnToGpu(t->columnNames[fields.front()], segment); // segment i
//cout << "gather " << fields.front() << endl;
gatherColumns(a, t, fields.front(), segment, count);
//cout << "end " << endl;
alloced_switch = 0;
}
else
a->mRecCount = 0;
}
else {
a->CopyColumnToGpu(a->columnNames[fields.front()], segment); // segment i
};
uniques.insert(fields.front());
};
fields.pop();
};
}
void setPrm(CudaSet* a, CudaSet* b, char val, unsigned int segment) {
b->prm.push_back(NULL);
b->prm_index.push_back(val);
if (val == 'A') {
b->mRecCount = b->mRecCount + getSegmentRecCount(a,segment);
b->prm_count.push_back(getSegmentRecCount(a, segment));
}
else {
b->prm_count.push_back(0);
};
}
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int offset, unsigned int g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
(void*)t->d_columns_char[t->type_index[tindex]], (void*)a->d_columns_char[a->type_index[idx]], a->char_size[a->type_index[idx]] );
}
else {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
alloced_tmp, (void*)a->d_columns_char[a->type_index[idx]], a->char_size[a->type_index[idx]] );
};
}
};
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int offset, unsigned int g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::copy(t->d_columns_int[t->type_index[tindex]].begin(), t->d_columns_int[t->type_index[tindex]].begin() + g_size,
a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::copy(t->d_columns_float[t->type_index[tindex]].begin(), t->d_columns_float[t->type_index[tindex]].begin() + g_size,
a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
hipMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), (void**)t->d_columns_char[t->type_index[tindex]],
g_size*t->char_size[t->type_index[tindex]], hipMemcpyDeviceToDevice);
}
else {
hipMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), alloced_tmp,
g_size*t->char_size[t->type_index[tindex]], hipMemcpyDeviceToDevice);
};
};
};
unsigned int load_queue(queue<string> c1, CudaSet* right, bool str_join, string f2, unsigned int &rcount)
{
queue<string> cc;
while(!c1.empty()) {
if(right->columnNames.find(c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() || str_join) {
cc.push(c1.front());
};
};
c1.pop();
};
if(!str_join) {
cc.push(f2);
};
unsigned int cnt_r = 0;
if(!right->prm.empty()) {
allocColumns(right, cc);
rcount = std::accumulate(right->prm_count.begin(), right->prm_count.end(), 0 );
}
else
rcount = right->mRecCount;
queue<string> ct(cc);
reset_offsets();
while(!ct.empty()) {
right->allocColumnOnDevice(right->columnNames[ct.front()], rcount);
ct.pop();
};
ct = cc;
if(right->prm.empty()) {
//copy all records
while(!ct.empty()) {
right->CopyColumnToGpu(right->columnNames[ct.front()]);
ct.pop();
};
cnt_r = right->mRecCount;
}
else {
//copy and gather all records
for(unsigned int i = 0; i < right->segCount; i++) {
copyColumns(right, cc, i, cnt_r);
cnt_r = cnt_r + right->prm_count[i];
};
};
return cnt_r;
}
unsigned int max_char(CudaSet* a)
{
unsigned int max_char = 0;
for(unsigned int i = 0; i < a->char_size.size(); i++)
if (a->char_size[i] > max_char)
max_char = a->char_size[i];
return max_char;
};
unsigned int max_char(CudaSet* a, set<string> field_names)
{
unsigned int max_char = 0;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
int i = a->columnNames[*it];
if (a->type[i] == 2) {
if (a->char_size[a->type_index[i]] > max_char)
max_char = a->char_size[i];
};
};
return max_char;
};
unsigned int max_tmp(CudaSet* a)
{
unsigned int max_sz = 0;
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[i] == 0) {
if(int_size > max_sz)
max_sz = int_size;
}
else if(a->type[i] == 1) {
if(float_size > max_sz)
max_sz = float_size;
};
};
unsigned int m_char = max_char(a);
if(m_char > max_sz)
return m_char;
else
return max_sz;
};
void reset_offsets() {
map<unsigned int, unsigned int>::iterator iter;
for (iter = str_offset.begin(); iter != str_offset.end(); ++iter) {
iter->second = 0;
};
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
unsigned int tot_sz = 0, idx;
while(!cols.empty()) {
idx = a->columnNames[cols.front()];
if(a->type[idx] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[a->type_index[idx]];
cols.pop();
};
cout << "tot " << tot_sz << endl;
if(a->mRecCount*tot_sz > mem_available/2) {
a->segCount = (a->mRecCount*tot_sz)/(mem_available/2) + 1;
};
};
|
54500e52c55923fa0f35663988520e0926bc6a5c.cu
|
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <functional>
#include <numeric>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#endif
using namespace std;
using namespace thrust::placeholders;
unsigned long long int total_count = 0;
unsigned int total_segments = 0;
unsigned int total_max;
unsigned int process_count;
map <unsigned int, unsigned int> str_offset;
long long int totalRecs = 0;
bool fact_file_loaded = 0;
char map_check;
void* d_v = NULL;
void* s_v = NULL;
unsigned int oldCount;
queue<string> op_type;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<string> col_aliases;
void* alloced_tmp;
unsigned int alloced_sz = 0;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string,string> setMap; //map to keep track of column names and set names
struct is_match
{
__host__ __device__
bool operator()(unsigned int x)
{
return x != 4294967295;
}
};
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return !(((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
struct l_to_ui
{
__host__ __device__
float_type operator()(const int_type x)
{
return (unsigned int)x;
}
};
struct float_to_decimal
{
__host__ __device__
float_type operator()(const float_type x)
{
return (int_type)(x*100);
}
};
struct to_zero
{
__host__ __device__
bool operator()(const int_type x)
{
if(x == -1)
return 0;
else
return 1;
}
};
struct div_long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x, const float_type y)
{
return (float_type)x/y;
}
};
struct long_to_float
{
__host__ __device__
float_type operator()(const long long int x)
{
return (((float_type)x)/100.0);
}
};
// trim from start
static inline std::string <rim(std::string &s) {
s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun<int, int>(std::isspace))));
return s;
}
// trim from end
static inline std::string &rtrim(std::string &s) {
s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
return s;
}
// trim from both ends
static inline std::string &trim(std::string &s) {
return ltrim(rtrim(s));
}
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, unsigned int& count);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int count, unsigned int g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int count, unsigned int g_size);
void write_compressed_char(string file_name, unsigned int index, unsigned int mCount);
unsigned int largest_prm(CudaSet* a);
unsigned int max_tmp(CudaSet* a);
unsigned int curr_segment = 10000000;
size_t getFreeMem();
char zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a, unsigned int segment);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
keep = false;
partial_load = 0;
source = 1;
text_source = 1;
grp = NULL;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
keep = false;
partial_load = 1;
source = 1;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(unsigned int RecordCount, unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, int_type Recs, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b,Recs, op_sel, op_sel_as);
keep = false;
partial_load = 0;
source = 0;
text_source = 0;
grp = NULL;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(unsigned int colIndex, unsigned int RecordCount)
{
if (type[colIndex] == 0) {
d_columns_int[type_index[colIndex]].resize(RecordCount);
}
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(RecordCount);
else {
void* d;
cudaMalloc(&d, char_size[type_index[colIndex]]*RecordCount);
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
void CudaSet::decompress_char_hash(unsigned int colIndex, unsigned int segment, unsigned int i_cnt)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count, old_count;
const unsigned int len = char_size[type_index[colIndex]];
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
strcat(f1,".");
itoaa(segment,col_pos);
strcat(f1,col_pos);
FILE* f;
f = fopen (f1 , "rb" );
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
unsigned long long int* hashes = new unsigned long long int[sz];
for(unsigned int i = 0; i < sz ; i++) {
hashes[i] = MurmurHash64A(&d_array[i*len], len, hash_seed); // divide by 2 so it will fit into a signed long long
};
void* d;
cudaMalloc((void **) &d, sz*int_size);
cudaMemcpy( d, (void *) hashes, sz*8, cudaMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> dd_int((unsigned long long int*)d);
delete[] d_array;
delete[] hashes;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
cudaMalloc((void **) &d_val, vals_count*8);
cudaMemcpy(d_val, (void *) int_array, vals_count*8, cudaMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> mval((unsigned long long int*)d_val);
delete[] int_array;
void* d_int;
cudaMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
void* d_v;
cudaMalloc((void **) &d_v, 8);
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v);
dd_v[1] = fit_count;
dd_v[0] = bits_encoded;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)d_v);
thrust::for_each(begin, begin + real_count, ff);
//thrust::device_ptr<long long int> dd_int((long long int*)d);
thrust::device_ptr<unsigned int> dd_val((unsigned int*)d_int);
if(!prm.empty()) {
if(prm_index[segment] == 'R') {
thrust::device_ptr<int_type> d_tmp = thrust::device_malloc<int_type>(real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_tmp);
if(prm_d.size() == 0) // find the largest prm segment
prm_d.resize(largest_prm(this));
cudaMemcpy((void**)(thrust::raw_pointer_cast(prm_d.data())), (void**)prm[segment],
4*prm_count[segment], cudaMemcpyHostToDevice);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + prm_count[segment]);
thrust::gather(prm_d.begin(), prm_d.begin() + prm_count[segment], d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index[segment] == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
};
cudaFree(d);
cudaFree(d_val);
cudaFree(d_v);
cudaFree(d_int);
};
// takes a char column , hashes strings, copies them to a gpu
void CudaSet::add_hashed_strings(string field, unsigned int segment, unsigned int i_cnt)
{
unsigned int colInd2 = columnNames.find(field)->second;
CudaSet *t = varNames[setMap[field]];
if(not_compressed) { // decompressed strings on a host
unsigned int old_count;
unsigned long long int* hashes = new unsigned long long int[t->mRecCount];
for(unsigned int i = 0; i < t->mRecCount ; i++)
hashes[i] = MurmurHash64A(t->h_columns_char[t->type_index[colInd2]] + i*t->char_size[t->type_index[colInd2]], t->char_size[t->type_index[colInd2]], hash_seed);
if(!prm.empty()) {
if(prm_index[segment] == 'R') {
thrust::device_ptr<unsigned long long int> d_tmp = thrust::device_malloc<unsigned long long int>(t->mRecCount);
thrust::copy(hashes, hashes+mRecCount, d_tmp);
if(prm_d.size() == 0) // find the largest prm segment
prm_d.resize(largest_prm(this));
cudaMemcpy((void**)(thrust::raw_pointer_cast(prm_d.data())), (void**)prm[segment],
4*prm_count[segment], cudaMemcpyHostToDevice);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + prm_count[segment]);
thrust::gather(prm_d.begin(), prm_d.begin() + prm_count[segment], d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index[segment] == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
}
else { // hash the dictionary
decompress_char_hash(colInd2, segment, i_cnt);
};
};
void CudaSet::resize(unsigned int addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0) {
h_columns_int[type_index[i]].resize(mRecCount);
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(mRecCount);
}
else {
if (h_columns_char[type_index[i]]) {
if (mRecCount > prealloc_char_size) {
prealloc_char_size = mRecCount;
h_columns_char[type_index[i]] = (char*)realloc(h_columns_char[type_index[i]], (unsigned long long int)mRecCount*(unsigned long long int)char_size[type_index[i]]);
};
}
else {
h_columns_char[type_index[i]] = new char[(unsigned long long int)mRecCount*(unsigned long long int)char_size[type_index[i]]];
};
};
};
};
void CudaSet::reserve(unsigned int Recs)
{
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0)
h_columns_int[type_index[i]].reserve(Recs);
else if(type[i] == 1)
h_columns_float[type_index[i]].reserve(Recs);
else {
unsigned long long int sz = (unsigned long long int)Recs*(unsigned long long int)char_size[type_index[i]];
h_columns_char[type_index[i]] = new char[(unsigned long long int)Recs*(unsigned long long int)char_size[type_index[i]]];
if(h_columns_char[type_index[i]] == NULL) {
cout << "Could not allocate on a host " << Recs << " records of size " << char_size[type_index[i]] << endl;
exit(0);
};
prealloc_char_size = Recs;
};
};
};
void CudaSet::deAllocColumnOnDevice(unsigned int colIndex)
{
if (type[colIndex] == 0 && !d_columns_int.empty()) {
d_columns_int[type_index[colIndex]].resize(0);
d_columns_int[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 1 && !d_columns_float.empty()) {
d_columns_float[type_index[colIndex]].resize(0);
d_columns_float[type_index[colIndex]].shrink_to_fit();
}
else if (type[colIndex] == 2 && d_columns_char[type_index[colIndex]] != NULL) {
cudaFree(d_columns_char[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = NULL;
};
};
void CudaSet::allocOnDevice(unsigned int RecordCount)
{
for(unsigned int i=0; i < mColumnCount; i++)
allocColumnOnDevice(i, RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i <mColumnCount; i++)
deAllocColumnOnDevice(i);
if(!columnGroups.empty() && mRecCount !=0) {
cudaFree(grp);
grp = NULL;
};
if(!prm.empty()) { // free the sources
string some_field;
map<string,int>::iterator it=columnNames.begin();
some_field = (*it).first;
if(setMap[some_field].compare(name)) {
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(unsigned int RecCount, unsigned int colIndex)
{
if (RecCount) {
if (type[colIndex] == 0)
d_columns_int[type_index[colIndex]].resize(mRecCount+RecCount);
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(mRecCount+RecCount);
else {
if (d_columns_char[type_index[colIndex]] != NULL)
cudaFree(d_columns_char[type_index[colIndex]]);
void *d;
cudaMalloc((void **) &d, (mRecCount+RecCount)*char_size[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
};
void CudaSet::resizeDevice(unsigned int RecCount)
{
if (RecCount)
for(unsigned int i=0; i < mColumnCount; i++)
resizeDeviceColumn(RecCount, i);
};
bool CudaSet::onDevice(unsigned int i)
{
unsigned j = type_index[i];
if (type[i] == 0) {
if (d_columns_int.empty())
return 0;
if (d_columns_int[j].size() == 0)
return 0;
}
else if (type[i] == 1) {
if (d_columns_float.empty())
return 0;
if(d_columns_float[j].size() == 0)
return 0;
}
else if (type[i] == 2) {
if(d_columns_char.empty())
return 0;
if(d_columns_char[j] == NULL)
return 0;
};
return 1;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
a->columnNames[(*it).first] = (*it).second;
for(unsigned int i=0; i < mColumnCount; i++) {
a->cols[i] = cols[i];
a->type[i] = type[i];
if(a->type[i] == 0) {
a->d_columns_int.push_back(thrust::device_vector<int_type>());
a->h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >());
a->type_index[i] = a->d_columns_int.size()-1;
}
else if(a->type[i] == 1) {
a->d_columns_float.push_back(thrust::device_vector<float_type>());
a->h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
a->type_index[i] = a->d_columns_float.size()-1;
a->decimal[i] = decimal[i];
}
else {
a->h_columns_char.push_back(NULL);
a->d_columns_char.push_back(NULL);
a->type_index[i] = a->d_columns_char.size()-1;
};
};
a->char_size = char_size;
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
unsigned long long int CudaSet::readSegmentsFromFile(unsigned int segNum, unsigned int colIndex)
{
char f1[100];
strcpy(f1, load_file_name);
strcat(f1,".");
char col_pos[3];
itoaa(cols[colIndex],col_pos);
strcat(f1,col_pos);
unsigned int cnt;
strcat(f1,".");
itoaa(segNum,col_pos);
strcat(f1,col_pos);
FILE* f;
f = fopen (f1 , "rb" );
if(type[colIndex] == 0) {
fread(h_columns_int[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_int[type_index[colIndex]].data()))[0];
fread((unsigned int*)(h_columns_int[type_index[colIndex]].data()) + 1, (cnt+8)*8 - 4, 1, f);
}
else if(type[colIndex] == 1) {
fread(h_columns_float[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_float[type_index[colIndex]].data()))[0];
fread((unsigned int*)(h_columns_float[type_index[colIndex]].data()) + 1, (cnt+8)*8 - 4, 1, f);
}
else {
decompress_char(f, colIndex, segNum);
};
fclose(f);
return 0;
};
void CudaSet::decompress_char(FILE* f, unsigned int colIndex, unsigned int segNum)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count;
const unsigned int len = char_size[type_index[colIndex]];
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
void* d;
cudaMalloc((void **) &d, sz*len);
cudaMemcpy( d, (void *) d_array, sz*len, cudaMemcpyHostToDevice);
delete[] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
cudaMalloc((void **) &d_val, vals_count*8);
cudaMemcpy(d_val, (void *) int_array, vals_count*8, cudaMemcpyHostToDevice);
delete[] int_array;
void* d_int;
cudaMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
void* d_v;
cudaMalloc((void **) &d_v, 8);
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v);
dd_v[1] = fit_count;
dd_v[0] = bits_encoded;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)d_v);
thrust::for_each(begin, begin + real_count, ff);
//thrust::device_ptr<unsigned int> dd_r((unsigned int*)d_int);
//for(int z = 0 ; z < 3; z++)
//cout << "DD " << dd_r[z] << endl;
//void* d_char;
//cudaMalloc((void **) &d_char, real_count*len);
//cudaMemset(d_char, 0, real_count*len);
//str_gather(d_int, real_count, d, d_char, len);
if(str_offset.count(colIndex) == 0)
str_offset[colIndex] = 0;
//cout << "str off " << str_offset[colIndex] << endl;
if(!alloced_switch)
str_gather(d_int, real_count, d, d_columns_char[type_index[colIndex]] + str_offset[colIndex]*len, len);
else
str_gather(d_int, real_count, d, alloced_tmp, len);
if(!prm.empty()) {
str_offset[colIndex] = str_offset[colIndex] + prm_count[segNum];
}
else {
str_offset[colIndex] = str_offset[colIndex] + real_count;
};
//if(d_columns_char[type_index[colIndex]])
// cudaFree(d_columns_char[type_index[colIndex]]);
//d_columns_char[type_index[colIndex]] = (char*)d_char;
mRecCount = real_count;
cudaFree(d);
cudaFree(d_val);
cudaFree(d_v);
cudaFree(d_int);
}
void CudaSet::CopyToGpu(unsigned int offset, unsigned int count)
{
if (not_compressed) {
for(unsigned int i = 0; i < mColumnCount; i++) {
switch(type[i]) {
case 0 :
thrust::copy(h_columns_int[type_index[i]].begin() + offset, h_columns_int[type_index[i]].begin() + offset + count, d_columns_int[type_index[i]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[i]].begin() + offset, h_columns_float[type_index[i]].begin() + offset + count, d_columns_float[type_index[i]].begin());
break;
default :
cudaMemcpy(d_columns_char[type_index[i]], h_columns_char[type_index[i]], char_size[type_index[i]]*(offset + count), cudaMemcpyHostToDevice);
};
};
}
else
for(unsigned int i = 0; i < mColumnCount; i++)
CopyColumnToGpu(i, offset, count);
};
void CudaSet::CopyColumnToGpu(unsigned int colIndex, unsigned int segment)
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
if(!alloced_switch)
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_col);
};
break;
case 1 :
if(!alloced_switch)
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_col);
};
break;
default :
if(!alloced_switch)
cudaMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, cudaMemcpyHostToDevice);
else
cudaMemcpy(alloced_tmp, h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, cudaMemcpyHostToDevice);
};
}
else {
unsigned long long int data_offset;
if (partial_load)
data_offset = readSegmentsFromFile(segment,colIndex);
if(type[colIndex] != 2) {
if(d_v == NULL)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
};
if(type[colIndex] == 0) {
if(!alloced_switch) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data()), h_columns_int[type_index[colIndex]].data() + data_offset, d_v, s_v);
}
else {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[type_index[colIndex]].data() + data_offset, d_v, s_v);
};
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
if(!alloced_switch) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()) , h_columns_float[type_index[colIndex]].data() + data_offset, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data()));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin(), long_to_float());
}
else {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[type_index[colIndex]].data() + data_offset, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
}
//else // uncompressed float
//cudaMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, cudaMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex) // copy all segments
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
break;
default :
cudaMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, cudaMemcpyHostToDevice);
};
}
else {
long long int data_offset;
unsigned long long int totalRecs = 0;
if(d_v == NULL)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(s_v == NULL);
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
str_offset[colIndex] = 0;
for(unsigned int i = 0; i < segCount; i++) {
if (partial_load)
data_offset = readSegmentsFromFile(i,colIndex);
if(type[colIndex] == 0) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + totalRecs), h_columns_int[type_index[colIndex]].data() + data_offset, d_v, s_v);
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs) , h_columns_float[type_index[colIndex]].data() + data_offset, d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totalRecs));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin() + totalRecs, long_to_float());
}
// else uncompressed float
//cudaMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, cudaMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
};
totalRecs = totalRecs + mRecCount;
};
mRecCount = totalRecs;
};
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex, unsigned int offset, unsigned int count)
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + offset + count, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + offset + count, d_columns_float[type_index[colIndex]].begin());
break;
default :
cudaMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*(offset + count), cudaMemcpyHostToDevice);
};
}
else {
};
}
void CudaSet::CopyColumnToHost(int colIndex, unsigned int offset, unsigned int RecCount)
{
switch(type[colIndex]) {
case 0 :
thrust::copy(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + RecCount, h_columns_int[type_index[colIndex]].begin() + offset);
break;
case 1 :
thrust::copy(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + RecCount, h_columns_float[type_index[colIndex]].begin() + offset);
break;
default :
cudaMemcpy(h_columns_char[type_index[colIndex]] + offset*char_size[type_index[colIndex]], d_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*RecCount, cudaMemcpyDeviceToHost);
}
}
void CudaSet::CopyColumnToHost(int colIndex)
{
CopyColumnToHost(colIndex, 0, mRecCount);
}
void CudaSet::CopyToHost(unsigned int offset, unsigned int count)
{
for(unsigned int i = 0; i < mColumnCount; i++) {
CopyColumnToHost(i, offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_int[type_index[colIndex]].data());
}
void CudaSet::GroupBy(stack<string> columnRef, unsigned int int_col_count)
{
int grpInd, colIndex;
if(grp)
cudaFree(grp);
CUDA_SAFE_CALL(cudaMalloc((void **) &grp, mRecCount * sizeof(bool)));
thrust::device_ptr<bool> d_grp(grp);
thrust::sequence(d_grp, d_grp+mRecCount, 0, 0);
thrust::device_ptr<bool> d_group = thrust::device_malloc<bool>(mRecCount);
d_group[mRecCount-1] = 1;
unsigned int i_count = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
columnGroups.push(columnRef.top()); // save for future references
colIndex = columnNames[columnRef.top()];
if(!onDevice(colIndex)) {
allocColumnOnDevice(colIndex,mRecCount);
CopyColumnToGpu(colIndex, mRecCount);
grpInd = 1;
}
else
grpInd = 0;
if (type[colIndex] == 0) { // int_type
thrust::transform(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_int[type_index[colIndex]].begin()+1, d_group, thrust::not_equal_to<int_type>());
}
else if (type[colIndex] == 1) { // float_type
thrust::transform(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_float[type_index[colIndex]].begin()+1, d_group, f_not_equal_to());
}
else { // Char
//str_grp(d_columns_char[type_index[colIndex]], mRecCount, d_group, char_size[type_index[colIndex]]);
//use int_type
thrust::transform(d_columns_int[int_col_count+i_count].begin(), d_columns_int[int_col_count+i_count].begin() + mRecCount - 1,
d_columns_int[int_col_count+i_count].begin()+1, d_group, thrust::not_equal_to<int_type>());
i_count++;
};
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<bool>());
if (grpInd == 1)
deAllocColumnOnDevice(colIndex);
};
thrust::device_free(d_group);
grp_count = thrust::count(d_grp, d_grp+mRecCount,1);
};
void CudaSet::addDeviceColumn(int_type* col, int colIndex, string colName, unsigned int recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
d_columns_int.push_back(thrust::device_vector<int_type>(recCount));
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
type_index[colIndex] = d_columns_int.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_int[type_index[colIndex]].size() < recCount) {
d_columns_int[type_index[colIndex]].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[type_index[colIndex]].begin());
};
void CudaSet::addDeviceColumn(float_type* col, int colIndex, string colName, unsigned int recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
d_columns_float.push_back(thrust::device_vector<float_type>(recCount));
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
type_index[colIndex] = d_columns_float.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_float[type_index[colIndex]].size() < recCount)
d_columns_float[type_index[colIndex]].resize(recCount);
};
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[type_index[colIndex]].begin());
};
void CudaSet::writeHeader(char* file_name, unsigned int col) {
char str[100];
char col_pos[3];
strcpy(str, file_name);
strcat(str,".");
itoaa(col,col_pos);
strcat(str,col_pos);
string ff = str;
strcat(str,".header");
fstream binary_file(str,ios::out|ios::binary|ios::app);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&total_segments, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
binary_file.close();
};
void CudaSet::Store(char* file_name, char* sep, unsigned int limit, bool binary )
{
if (mRecCount == 0 && binary == 1) { // write tails
for(unsigned int i = 0; i< mColumnCount; i++) {
writeHeader(file_name, cols[i]);
};
return;
};
unsigned int mCount;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else
mCount = mRecCount;
if(binary == 0) {
char buffer [33];
queue<string> op_vx;
for ( map<string,int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
op_vx.push((*it).first);
curr_segment = 1000000;
FILE *file_pr = fopen(file_name, "w");
if (file_pr == NULL)
cout << "Could not open file " << file_name << endl;
if(prm.size() || source)
allocColumns(this, op_vx);
unsigned int curr_seg = 0, cnt = 0;
unsigned curr_count, sum_printed = 0;
while(sum_printed < mCount) {
// cout << "mcount " << mCount << " " << prm.size() << " " << keep << endl;
if(prm.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
// if host arrays are empty
unsigned int olRecs = mRecCount;
resize(mRecCount);
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount)
curr_count = mRecCount;
else {
curr_count = mCount - sum_printed;
};
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
string ss;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < mColumnCount; j++) {
if (type[j] == 0) {
sprintf(buffer, "%lld", (h_columns_int[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else if (type[j] == 1) {
sprintf(buffer, "%.2f", (h_columns_float[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else {
ss.assign(h_columns_char[type_index[j]] + (i*char_size[type_index[j]]), char_size[type_index[j]]);
trim(ss);
fputs(ss.c_str(), file_pr);
fputs(sep, file_pr);
};
};
if (i != mCount -1)
fputs("\n",file_pr);
};
curr_seg++;
};
fclose(file_pr);
}
else if(text_source) { //writing a binary file using a text file as a source
char str[100];
char col_pos[3];
total_count = total_count + mCount;
total_segments = total_segments + 1;
if (mCount > total_max)
total_max = mCount;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, mCount*float_size));
for(unsigned int i = 0; i< mColumnCount; i++) {
strcpy(str, file_name);
strcat(str,".");
itoaa(cols[i],col_pos);
strcat(str,col_pos);
curr_file = str;
strcat(str,".");
itoaa(total_segments-1,col_pos);
strcat(str,col_pos);
cout << "Writing to " << str << endl;
if(type[i] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::copy(h_columns_int[type_index[i]].begin(), h_columns_int[type_index[i]].begin() + mCount, d_col);
cout << "Compressing " << endl;
pfor_compress( d, mCount*int_size, str, h_columns_int[type_index[i]], 0, 0);
cout << "Compressed " << endl;
}
else if(type[i] == 1) {
if(decimal[i]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
thrust::copy(h_columns_float[type_index[i]].begin(), h_columns_float[type_index[i]].begin() + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
cout << "Compressing " << endl;
pfor_compress( d, mCount*float_size, str, h_columns_float[type_index[i]], 1, 0);
cout << "Compressed " << endl;
}
else { // do not compress -- float
fstream binary_file(str,ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[type_index[i]].data()),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else { //char
cout << "Compressing char" << endl;
compress_char(str, i, mCount);
cout << "Compressed char " << endl;
};
if(fact_file_loaded) {
writeHeader(file_name, cols[i]);
};
};
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 2)
deAllocColumnOnDevice(i);
cudaFree(d);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
// do it for every segment
// will add this later
};
}
void CudaSet::compress_char(string file_name, unsigned int index, unsigned int mCount)
{
std::vector<string> v1;
std::map<string,unsigned int> dict;
std::vector<string> dict_ordered;
std::vector<unsigned int> dict_val;
map<string,unsigned int>::iterator iter;
unsigned int bits_encoded;
char* field;
unsigned int len = char_size[type_index[index]];
field = new char[len];
for (unsigned int i = 0 ; i < mCount; i++) {
strncpy(field, h_columns_char[type_index[index]] + i*len, char_size[type_index[index]]);
v1.push_back(field);
if((iter = dict.find(field)) != dict.end()) {
dict_val.push_back(iter->second);
}
else {
string f = field;
dict[f] = dict.size();
dict_val.push_back(dict.size()-1);
dict_ordered.push_back(f);
};
};
bits_encoded = (unsigned int)ceil(log2(double(dict.size()+1)));
char *cc = new char[len+1];
unsigned int sz = dict_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary);
binary_file.write((char *)&sz, 4);
for(unsigned int i = 0; i < dict_ordered.size(); i++) {
memset(&cc[0], 0, len);
strcpy(cc,dict_ordered[i].c_str());
binary_file.write(cc, len);
};
delete [] cc;
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, 8);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
void CudaSet::LoadFile(char* file_name, char* sep )
{
unsigned int count = 0;
char line[500];
char* field;
unsigned int current_column = 1;
FILE *file_ptr = fopen(file_name, "r");
if (file_ptr == NULL)
cout << "Could not open file " << file_name << endl;
unsigned int *seq = new unsigned int[mColumnCount];
thrust::sequence(seq, seq+mColumnCount,0,1);
thrust::stable_sort_by_key(cols, cols+mColumnCount, seq);
while (fgets(line, 500, file_ptr) != NULL ) {
current_column = 1;
field = strtok(line,sep);
for(unsigned int i = 0; i< mColumnCount; i++) {
while(cols[i] > current_column) {
field = strtok(NULL,sep);
current_column++;
};
if (type[seq[i]] == 0) {
if (strchr(field,'-') == NULL) {
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
}
else { // handling possible dates
strncpy(field+4,field+5,2);
strncpy(field+6,field+8,2);
field[8] = '\0';
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
};
}
else if (type[seq[i]] == 1)
(h_columns_float[type_index[seq[i]]])[count] = atoff(field);
else {
strcpy(h_columns_char[type_index[seq[i]]] + count*char_size[type_index[seq[i]]], field);
};
};
count++;
if (count == mRecCount) {
mRecCount = mRecCount + process_count;
resize(mRecCount);
};
};
fclose(file_ptr);
mRecCount = count;
};
int CudaSet::LoadBigFile(const char* file_name, const char* sep )
{
unsigned int count = 0;
char line[1000];
char* field;
unsigned int current_column = 1;
if (file_p == NULL)
file_p = fopen(file_name, "r");
if (file_p == NULL)
cout << "Could not open file " << file_name << endl;
if (seq == 0) {
seq = new unsigned int[mColumnCount];
thrust::sequence(seq, seq+mColumnCount,0,1);
thrust::stable_sort_by_key(cols, cols+mColumnCount, seq);
};
while (count < process_count && fgets(line, 1000, file_p) != NULL) {
current_column = 1;
field = strtok(line,sep);
for(unsigned int i = 0; i< mColumnCount; i++) {
while(cols[i] > current_column) {
field = strtok(NULL,sep);
current_column++;
};
if (type[seq[i]] == 0) {
if (strchr(field,'-') == NULL) {
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
}
else { // handling possible dates
strncpy(field+4,field+5,2);
strncpy(field+6,field+8,2);
field[8] = '\0';
(h_columns_int[type_index[seq[i]]])[count] = atoll(field);
};
}
else if (type[seq[i]] == 1)
(h_columns_float[type_index[seq[i]]])[count] = atoff(field);
else {//char
strcpy(h_columns_char[type_index[seq[i]]] + count*char_size[type_index[seq[i]]], field);
}
};
count++;
};
mRecCount = count;
if(count < process_count) {
fclose(file_p);
return 1;
}
else
return 0;
};
void CudaSet::free() {
if (!seq)
delete seq;
for(unsigned int i = 0; i < mColumnCount; i++ ) {
if(type[i] == 2 && h_columns_char[type_index[i]] && prm.empty()) {
delete [] h_columns_char[type_index[i]];
h_columns_char[type_index[i]] = NULL;
};
};
if(!prm.empty()) { // free the sources
string some_field;
map<string,int>::iterator it=columnNames.begin();
some_field = (*it).first;
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
delete type;
delete cols;
if(!columnGroups.empty() && mRecCount !=0 && grp != NULL)
cudaFree(grp);
for(unsigned int i = 0; i < prm.size(); i++)
delete [] prm[i];
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
}
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
};
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs, char* file_name) // compressed data for DIM tables
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
unsigned int cnt;
file_p = NULL;
FILE* f;
char f1[100];
not_compressed = 0;
mRecCount = Recs;
load_file_name = file_name;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
strcpy(f1, file_name);
strcat(f1,".");
char col_pos[3];
itoaa(colsRef.front(),col_pos);
strcat(f1,col_pos); // read the size of a segment
strcat(f1, ".header");
f = fopen (f1 , "rb" );
for(unsigned int j = 0; j < 5; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
//cout << "creating " << f1 << " " << cnt << endl;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>(cnt + 9));
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type >());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>(cnt + 9));
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, int_type Recs)
{
mColumnCount = nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
file_p = NULL;
mRecCount = Recs;
segCount = 1;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
seq = 0;
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(unsigned int RecordCount, unsigned int ColumnCount)
{
mRecCount = RecordCount;
mColumnCount = ColumnCount;
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
seq = 0;
for(unsigned int i =0; i < mColumnCount; i++) {
cols[i] = i;
};
};
void CudaSet::initialize(CudaSet* a, CudaSet* b, int_type Recs, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = Recs;
mColumnCount = op_sel.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
maxRecs = b->maxRecs;
map<string,int>::iterator it;
seq = 0;
unsigned int i = 0;
segCount = 1;
not_compressed = 1;
col_aliases = op_sel_as;
queue<string> names(op_sel);
while(!names.empty()) {
columnNames[names.front()] = i;
names.pop();
i++;
};
unsigned int index;
for(unsigned int i=0; i < mColumnCount; i++) {
if((it = a->columnNames.find(op_sel.front())) != a->columnNames.end()) {
index = it->second;
cols[i] = i;
decimal[i] = a->decimal[i];
if ((a->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type>());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type>());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(a->char_size[a->type_index[index]]);
prealloc_char_size = 0;
};
}
else {
it = b->columnNames.find(op_sel.front());
index = it->second;
cols[i] = i;
decimal[i] = b->decimal[index];
if ((b->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type>>());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((b->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type>>());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(b->char_size[b->type_index[index]]);
prealloc_char_size = 0;
};
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 5;
else if (op_type == 1) // <
return 6;
else if (op_type == 6) // >=
return 1;
else if (op_type == 5) // <=
return 2;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
cudaMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(!a->prm.empty()) {
unsigned int max_sz = max_tmp(a) ;
CudaSet* t = varNames[setMap[fields.front()]];
if(max_sz*t->maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, max_sz*t->maxRecs);
alloced_sz = max_sz*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(setMap.count(fields.front()) > 0) {
unsigned int idx = a->columnNames[fields.front()];
bool onDevice = 0;
if(a->type[idx] == 0) {
if(a->d_columns_int[a->type_index[idx]].size() > 0) {
onDevice = 1;
}
}
else if(a->type[idx] == 1) {
if(a->d_columns_float[a->type_index[idx]].size() > 0) {
onDevice = 1;
};
}
else {
if((a->d_columns_char[a->type_index[idx]]) != NULL) {
onDevice = 1;
};
};
if (!onDevice) {
if(a->prm.empty()) {
a->allocColumnOnDevice(idx, a->maxRecs);
}
else {
a->allocColumnOnDevice(idx, largest_prm(a));
};
};
};
fields.pop();
};
};
}
unsigned int largest_prm(CudaSet* a)
{
unsigned int maxx = 0;
for(unsigned int i = 0; i < a->prm_count.size(); i++)
if(maxx < a->prm_count[i])
maxx = a->prm_count[i];
if(maxx == 0)
maxx = a->maxRecs;
return maxx;
};
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, unsigned int& count)
{
unsigned int tindex = t->columnNames[field];
unsigned int idx = a->columnNames[field];
//find the largest possible size of a gathered segment
if(!a->onDevice(idx)) {
unsigned int max_count = 0;
for(unsigned int i = 0; i < a->prm.size(); i++)
if (a->prm_count[i] > max_count)
max_count = a->prm_count[i];
a->allocColumnOnDevice(idx, max_count);
};
unsigned int g_size = a->prm_count[segment];
if(a->prm_index[segment] == 'R') {
if(a->prm_d.size() == 0) // find the largest prm segment
a->prm_d.resize(largest_prm(a));
if(curr_segment != segment) {
cudaMemcpy((void**)(thrust::raw_pointer_cast(a->prm_d.data())), (void**)a->prm[segment],
4*g_size, cudaMemcpyHostToDevice);
curr_segment = segment;
};
mygather(tindex, idx, a, t, count, g_size);
}
else {
mycopy(tindex, idx, a, t, count, g_size);
};
a->mRecCount = g_size;
}
unsigned int getSegmentRecCount(CudaSet* a, unsigned int segment) {
if (segment == a->segCount-1) {
return oldCount - a->maxRecs*segment;
}
else
return a->maxRecs;
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, unsigned int& count)
{
set<string> uniques;
CudaSet *t;
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && setMap.count(fields.front()) > 0) {
if(!a->prm.empty()) {
t = varNames[setMap[fields.front()]];
if(a->prm_count[segment]) {
alloced_switch = 1;
//cout << "copy " << fields.front() << " " << alloced_switch << endl;
t->CopyColumnToGpu(t->columnNames[fields.front()], segment); // segment i
//cout << "gather " << fields.front() << endl;
gatherColumns(a, t, fields.front(), segment, count);
//cout << "end " << endl;
alloced_switch = 0;
}
else
a->mRecCount = 0;
}
else {
a->CopyColumnToGpu(a->columnNames[fields.front()], segment); // segment i
};
uniques.insert(fields.front());
};
fields.pop();
};
}
void setPrm(CudaSet* a, CudaSet* b, char val, unsigned int segment) {
b->prm.push_back(NULL);
b->prm_index.push_back(val);
if (val == 'A') {
b->mRecCount = b->mRecCount + getSegmentRecCount(a,segment);
b->prm_count.push_back(getSegmentRecCount(a, segment));
}
else {
b->prm_count.push_back(0);
};
}
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int offset, unsigned int g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
(void*)t->d_columns_char[t->type_index[tindex]], (void*)a->d_columns_char[a->type_index[idx]], a->char_size[a->type_index[idx]] );
}
else {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
alloced_tmp, (void*)a->d_columns_char[a->type_index[idx]], a->char_size[a->type_index[idx]] );
};
}
};
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, unsigned int offset, unsigned int g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::copy(t->d_columns_int[t->type_index[tindex]].begin(), t->d_columns_int[t->type_index[tindex]].begin() + g_size,
a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::copy(t->d_columns_float[t->type_index[tindex]].begin(), t->d_columns_float[t->type_index[tindex]].begin() + g_size,
a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
cudaMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), (void**)t->d_columns_char[t->type_index[tindex]],
g_size*t->char_size[t->type_index[tindex]], cudaMemcpyDeviceToDevice);
}
else {
cudaMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), alloced_tmp,
g_size*t->char_size[t->type_index[tindex]], cudaMemcpyDeviceToDevice);
};
};
};
unsigned int load_queue(queue<string> c1, CudaSet* right, bool str_join, string f2, unsigned int &rcount)
{
queue<string> cc;
while(!c1.empty()) {
if(right->columnNames.find(c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() || str_join) {
cc.push(c1.front());
};
};
c1.pop();
};
if(!str_join) {
cc.push(f2);
};
unsigned int cnt_r = 0;
if(!right->prm.empty()) {
allocColumns(right, cc);
rcount = std::accumulate(right->prm_count.begin(), right->prm_count.end(), 0 );
}
else
rcount = right->mRecCount;
queue<string> ct(cc);
reset_offsets();
while(!ct.empty()) {
right->allocColumnOnDevice(right->columnNames[ct.front()], rcount);
ct.pop();
};
ct = cc;
if(right->prm.empty()) {
//copy all records
while(!ct.empty()) {
right->CopyColumnToGpu(right->columnNames[ct.front()]);
ct.pop();
};
cnt_r = right->mRecCount;
}
else {
//copy and gather all records
for(unsigned int i = 0; i < right->segCount; i++) {
copyColumns(right, cc, i, cnt_r);
cnt_r = cnt_r + right->prm_count[i];
};
};
return cnt_r;
}
unsigned int max_char(CudaSet* a)
{
unsigned int max_char = 0;
for(unsigned int i = 0; i < a->char_size.size(); i++)
if (a->char_size[i] > max_char)
max_char = a->char_size[i];
return max_char;
};
unsigned int max_char(CudaSet* a, set<string> field_names)
{
unsigned int max_char = 0;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
int i = a->columnNames[*it];
if (a->type[i] == 2) {
if (a->char_size[a->type_index[i]] > max_char)
max_char = a->char_size[i];
};
};
return max_char;
};
unsigned int max_tmp(CudaSet* a)
{
unsigned int max_sz = 0;
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[i] == 0) {
if(int_size > max_sz)
max_sz = int_size;
}
else if(a->type[i] == 1) {
if(float_size > max_sz)
max_sz = float_size;
};
};
unsigned int m_char = max_char(a);
if(m_char > max_sz)
return m_char;
else
return max_sz;
};
void reset_offsets() {
map<unsigned int, unsigned int>::iterator iter;
for (iter = str_offset.begin(); iter != str_offset.end(); ++iter) {
iter->second = 0;
};
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
unsigned int tot_sz = 0, idx;
while(!cols.empty()) {
idx = a->columnNames[cols.front()];
if(a->type[idx] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[a->type_index[idx]];
cols.pop();
};
cout << "tot " << tot_sz << endl;
if(a->mRecCount*tot_sz > mem_available/2) {
a->segCount = (a->mRecCount*tot_sz)/(mem_available/2) + 1;
};
};
|
5fe65a1e0929d28eb621c1a9f20ea075b152eb46.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Compute_weightx_weighty1_norm2_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *weightx = NULL;
hipMalloc(&weightx, XSIZE*YSIZE);
float *weighty = NULL;
hipMalloc(&weighty, XSIZE*YSIZE);
const float *psi = NULL;
hipMalloc(&psi, XSIZE*YSIZE);
const float *phi = NULL;
hipMalloc(&phi, XSIZE*YSIZE);
const float *absIx = NULL;
hipMalloc(&absIx, XSIZE*YSIZE);
const float *absIy = NULL;
hipMalloc(&absIy, XSIZE*YSIZE);
int nPixels = 1;
float norm_for_smooth_term = XSIZE*YSIZE;
float eps = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Compute_weightx_weighty1_norm2_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weightx,weighty,psi,phi,absIx,absIy,nPixels,norm_for_smooth_term,eps);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Compute_weightx_weighty1_norm2_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weightx,weighty,psi,phi,absIx,absIy,nPixels,norm_for_smooth_term,eps);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Compute_weightx_weighty1_norm2_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weightx,weighty,psi,phi,absIx,absIy,nPixels,norm_for_smooth_term,eps);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
5fe65a1e0929d28eb621c1a9f20ea075b152eb46.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Compute_weightx_weighty1_norm2_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *weightx = NULL;
cudaMalloc(&weightx, XSIZE*YSIZE);
float *weighty = NULL;
cudaMalloc(&weighty, XSIZE*YSIZE);
const float *psi = NULL;
cudaMalloc(&psi, XSIZE*YSIZE);
const float *phi = NULL;
cudaMalloc(&phi, XSIZE*YSIZE);
const float *absIx = NULL;
cudaMalloc(&absIx, XSIZE*YSIZE);
const float *absIy = NULL;
cudaMalloc(&absIy, XSIZE*YSIZE);
int nPixels = 1;
float norm_for_smooth_term = XSIZE*YSIZE;
float eps = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Compute_weightx_weighty1_norm2_Kernel<<<gridBlock,threadBlock>>>(weightx,weighty,psi,phi,absIx,absIy,nPixels,norm_for_smooth_term,eps);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Compute_weightx_weighty1_norm2_Kernel<<<gridBlock,threadBlock>>>(weightx,weighty,psi,phi,absIx,absIy,nPixels,norm_for_smooth_term,eps);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Compute_weightx_weighty1_norm2_Kernel<<<gridBlock,threadBlock>>>(weightx,weighty,psi,phi,absIx,absIy,nPixels,norm_for_smooth_term,eps);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
94e5bfd6ac9431e91db4218a604b4f99a93f1be7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel/gpu/cuda_impl/adam_impl.cuh"
template <typename T>
__device__ __forceinline__ T SqrtFunc(T input) {
return sqrt(input);
}
template <>
__device__ __forceinline__ half SqrtFunc(half input) {
return hsqrt(input);
}
template <typename T>
__global__ void ApplyAdamKernel(const size_t size, const T *gradient, const T *beta1_power, const T *beta2_power,
const T *learning_rate, const T *beta1, const T *beta2, const T *epsilon, T *variable,
T *m, T *v) {
const T one = static_cast<T>(1.0);
const T new_learning_rate = learning_rate[0] * SqrtFunc(one - beta2_power[0]) / (one - beta1_power[0]);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] += (gradient[i] - m[i]) * (one - beta1[0]);
v[i] += (gradient[i] * gradient[i] - v[i]) * (one - beta2[0]);
variable[i] -= new_learning_rate * m[i] / (SqrtFunc(v[i]) + epsilon[0]);
}
}
template <typename T>
void ApplyAdam(const size_t size, const T *gradient, const T *beta1_power, const T *beta2_power, const T *learning_rate,
const T *beta1, const T *beta2, const T *epsilon, T *variable, T *m, T *v, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( ApplyAdamKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream,
size, gradient, beta1_power, beta2_power, learning_rate, beta1, beta2, epsilon, variable, m, v);
}
template void ApplyAdam<float>(const size_t size, const float *gradient, const float *beta1_power,
const float *beta2_power, const float *learning_rate, const float *beta1,
const float *beta2, const float *epsilon, float *variable, float *m, float *v,
hipStream_t cuda_stream);
template void ApplyAdam<half>(const size_t size, const half *gradient, const half *beta1_power, const half *beta2_power,
const half *learning_rate, const half *beta1, const half *beta2, const half *epsilon,
half *variable, half *m, half *v, hipStream_t cuda_stream);
|
94e5bfd6ac9431e91db4218a604b4f99a93f1be7.cu
|
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel/gpu/cuda_impl/adam_impl.cuh"
template <typename T>
__device__ __forceinline__ T SqrtFunc(T input) {
return sqrt(input);
}
template <>
__device__ __forceinline__ half SqrtFunc(half input) {
return hsqrt(input);
}
template <typename T>
__global__ void ApplyAdamKernel(const size_t size, const T *gradient, const T *beta1_power, const T *beta2_power,
const T *learning_rate, const T *beta1, const T *beta2, const T *epsilon, T *variable,
T *m, T *v) {
const T one = static_cast<T>(1.0);
const T new_learning_rate = learning_rate[0] * SqrtFunc(one - beta2_power[0]) / (one - beta1_power[0]);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) {
m[i] += (gradient[i] - m[i]) * (one - beta1[0]);
v[i] += (gradient[i] * gradient[i] - v[i]) * (one - beta2[0]);
variable[i] -= new_learning_rate * m[i] / (SqrtFunc(v[i]) + epsilon[0]);
}
}
template <typename T>
void ApplyAdam(const size_t size, const T *gradient, const T *beta1_power, const T *beta2_power, const T *learning_rate,
const T *beta1, const T *beta2, const T *epsilon, T *variable, T *m, T *v, cudaStream_t cuda_stream) {
ApplyAdamKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(
size, gradient, beta1_power, beta2_power, learning_rate, beta1, beta2, epsilon, variable, m, v);
}
template void ApplyAdam<float>(const size_t size, const float *gradient, const float *beta1_power,
const float *beta2_power, const float *learning_rate, const float *beta1,
const float *beta2, const float *epsilon, float *variable, float *m, float *v,
cudaStream_t cuda_stream);
template void ApplyAdam<half>(const size_t size, const half *gradient, const half *beta1_power, const half *beta2_power,
const half *learning_rate, const half *beta1, const half *beta2, const half *epsilon,
half *variable, half *m, half *v, cudaStream_t cuda_stream);
|
e6f5f9efb6939c5882f32211fac5488705530d50.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
void initStart(const int npart, const float temp, float** positions_ptr, float** ppositions_ptr, float** velocities_ptr, float** f_ptr, const float dt, const float *box) {
int i,j,k,counter;
float* sumv = (float*)calloc(3,sizeof(float));
float sumv2 = 0;
int dimensions = (int)ceil(cbrtf(npart));
float dx = box[0]/dimensions;
float dy = box[1]/dimensions;
float dz = box[2]/dimensions;
*positions_ptr = (float*) calloc(3*npart,sizeof(float));
*velocities_ptr = (float*) calloc(3*npart,sizeof(float));
*ppositions_ptr = (float*) calloc(3*npart,sizeof(float));
*f_ptr = (float*) calloc(3*npart,sizeof(float));
float* positions = *positions_ptr;
float* velocities = *velocities_ptr;
float* ppositions = *ppositions_ptr;
fprintf(stderr,"dimensions: %d dx: %f\n",dimensions,dx);
//srand(time(NULL));
srand(42);
for (i = 0; i < npart; i++) {
for(j = 0; j < 3; j++){
//positions[i*3 + j] = (rand()/(float)RAND_MAX)*box;
velocities[i*3 + j] = (rand()/(float)RAND_MAX - 0.5);
sumv[j] += velocities[i*3+j];
sumv2 += velocities[i*3+j]*velocities[i*3+j];
}
}
counter=0;
for (i=0; i<dimensions; i++)
for(j=0; j<dimensions; j++)
for(k=0; k<dimensions; k++) {
if(counter == npart) {
j=dimensions;
i=dimensions;
break;
}
positions[(dimensions*dimensions*i+dimensions*j+k)*3] = k*dx +dx/2 -box[0]/2;
positions[(dimensions*dimensions*i+dimensions*j+k)*3+1] = j*dy +dy/2 -box[1]/2;
positions[(dimensions*dimensions*i+dimensions*j+k)*3+2] = i*dz +dz/2 -box[2]/2;
counter++;
//fprintf(stderr,"counter: %d i= %d j=%d k=%d\n",counter,i,j,k);
}
sumv[0] /= npart;
sumv[1] /= npart;
sumv[2] /= npart;
sumv2 /= npart;
float vCMS2 = sumv[0]*sumv[0] + sumv[1]*sumv[1] + sumv[2]*sumv[2];
fprintf(stdout,"Velocity Center of mass: %f frame 0\n",vCMS2);
float fs = sqrt(3*temp/sumv2);
for (i = 0; i < npart; i++) {
for(j = 0; j < 3; j++) {
velocities[i*3 + j] -= sumv[j];
velocities[i*3 + j] *= fs;
ppositions[i*3 + j] = positions[i*3 + j] - velocities[i*3 + j]*dt;
}
}
free(sumv);
}
void initContinued(int *npart, float** positions_ptr, float** velocities_ptr, float** f_ptr, FILE* xin, FILE* vin) {
int i;
srand(420);
fscanf(xin,"%d\n",npart); //read number of particles
int npartCheck;
fscanf(vin,"%d\n",&npartCheck);
if(npartCheck != *npart) {
fprintf(stdout, "Error: Input files incompatible!\n");
exit(-1);
}
fprintf(stdout,"Particles: %d\n",*npart);
*positions_ptr = (float*) calloc(3*(*npart),sizeof(float));
*velocities_ptr = (float*) calloc(3*(*npart),sizeof(float));
*f_ptr = (float*) calloc(3*(*npart),sizeof(float));
float* positions = *positions_ptr;
float* velocities = *velocities_ptr;
//skip second line
fscanf(xin, "%*[^\n]\n", NULL);
fscanf(vin, "%*[^\n]\n", NULL);
for(i=0; i<*npart; i++) {
fscanf(xin, "atom %f %f %f\n",&positions[3*i],&positions[3*i+1],&positions[3*i+2]);
fscanf(vin, "atom %f %f %f\n",&velocities[3*i], &velocities[3*i+1], &velocities[3*i+2]);
//fprintf(stderr,"i: %d\n",i);
}
}
void initPreset(float temp, int *npart, float** positions_ptr, float** velocities_ptr, float** f_ptr, FILE* xin) {
int i,j;
float* sumv = (float*)calloc(3,sizeof(float));
float sumv2 = 0;
srand(421);
fscanf(xin,"%d\n",npart); //read number of particles
*positions_ptr = (float*) calloc(3*(*npart),sizeof(float));
*velocities_ptr = (float*) calloc(3*(*npart),sizeof(float));
*f_ptr = (float*) calloc(3*(*npart),sizeof(float));
float* positions = *positions_ptr;
float* velocities = *velocities_ptr;
//skip second line
fscanf(xin, "%*[^\n]\n", NULL);
for(i=0; i<*npart; i++)
fscanf(xin, "atom %f %f %f\n",&positions[3*i],&positions[3*i+1],&positions[3*i+2]);
for (i = 0; i < *npart; i++) {
for(j = 0; j < 3; j++){
velocities[i*3 + j] = (rand()/(float)RAND_MAX - 0.5);
sumv[j] += velocities[i*3+j];
sumv2 += velocities[i*3+j]*velocities[i*3+j];
}
}
sumv[0] /= *npart;
sumv[1] /= *npart;
sumv[2] /= *npart;
sumv2 /= *npart;
float vCMS2 = sumv[0]*sumv[0] + sumv[1]*sumv[1] + sumv[2]*sumv[2];
fprintf(stdout,"Velocity Center of mass: %f frame 0\n",vCMS2);
float fs = sqrt(3*temp/sumv2);
for (i = 0; i < *npart; i++) {
for(j = 0; j < 3; j++) {
velocities[i*3 + j] -= sumv[j];
velocities[i*3 + j] *= fs;
}
}
free(sumv);
}
float gauss(float sigma, float mean) {
float r =2.0;
float v1,v2;
while( r >= 1) {
v1=2*(rand()/(float)RAND_MAX)-1;
v2=2*(rand()/(float)RAND_MAX)-1;
r=v1*v1+v2*v2;
}
float l= v1*sqrt(-2*log(r)/r);
return mean+sigma*l;
}
#ifdef CUDA
void errorHandler (hipError_t error, int line){
if(error != hipSuccess)
{
// print the CUDA error message and exit
fprintf(stderr,"CUDA error: %s in line number %d\n", hipGetErrorString(error),line);
exit(-1);
}
}
__global__ void particleForce(float* f, const float* positions, const int npart, const float boxX, const float boxY, const float boxZ, const float cutoff2, const int n, const float ecut, float* energies,float* SurfaceTensions) {
int threadId = blockIdx.x*blockDim.x + threadIdx.x;
if(threadId >= npart)
return;
int j;
float rx;
float ry;
float rz;
float r2;
float r2i;
float r6i;
float ff; //lennard-jones potential
f[3*threadId] = 0;
f[3*threadId+1] =0;
f[3*threadId+2] =0;
energies[threadId] =0;
SurfaceTensions[threadId] =0;
for(j=0; j<npart; j++) {
if(j==threadId) continue;
rx = positions[3*threadId] - positions[3*j];
ry = positions[3*threadId+1] - positions[3*j+1];
rz = positions[3*threadId+2] - positions[3*j+2];
rx -= boxX* round(rx/boxX);
ry -= boxY* round(ry/boxY);
rz -= boxZ* round(rz/boxZ);
r2 = rx*rx + ry*ry + rz*rz;
if(r2 < 0.1) printf("Not good! i %d j %d distance %f frame %d\n",threadId,j,r2,n);
if(r2 < cutoff2) {
r2i = 1/r2;
r6i = r2i*r2i*r2i;
ff = 48*r2i*r6i*(r6i - 0.5);
f[3*threadId] += ff*rx;
f[3*threadId+1] += ff*ry;
f[3*threadId+2] += ff*rz;
if(j>threadId) {
//ff = abs(ff);
energies[threadId] += 4*r6i*(r6i-1) - ecut;
SurfaceTensions[threadId] += ff * (rx*rx + ry*ry - 2 * rz * rz);
}
}
}
}
void calcForce(float *st,float* en, float* f, const float* positions, const int npart, const float *box, const float cutoff2, const int n, const float ecut) {
float *devPtr_forces;
float *devPtr_positions;
float *devPtr_energies;
float *devPtr_SurfaceTensions;
int i;
float *energies = (float*)calloc(npart,sizeof(float));
float *SurfaceTensions = (float*)calloc(npart,sizeof(float));
errorHandler(hipMalloc((void**)&devPtr_forces, 3*npart* sizeof(float)),__LINE__);
errorHandler(hipMalloc((void**)&devPtr_positions, 3*npart* sizeof(float)),__LINE__);
errorHandler(hipMalloc((void**)&devPtr_energies, npart*sizeof(float)),__LINE__);
errorHandler(hipMalloc((void**)&devPtr_SurfaceTensions, npart*sizeof(float)),__LINE__);
//errorHandler(hipMemcpy(devPtr_forces, f, 3*npart * sizeof(float), hipMemcpyHostToDevice),__LINE__);
errorHandler(hipMemcpy(devPtr_positions, positions, 3*npart * sizeof(float), hipMemcpyHostToDevice),__LINE__);
int threadsPerBlock = 512;
int blocks = npart/threadsPerBlock + 1;
//fprintf(stderr,"starting GPU calc\n");
hipLaunchKernelGGL(( particleForce), dim3(blocks), dim3(threadsPerBlock), 0, 0, devPtr_forces, devPtr_positions, npart, box[0], box[1], box[2], cutoff2, n,ecut, devPtr_energies,devPtr_SurfaceTensions);
errorHandler( hipPeekAtLastError(),__LINE__);
errorHandler(hipMemcpy(f, devPtr_forces, 3*npart * sizeof(float), hipMemcpyDeviceToHost),__LINE__);
errorHandler(hipMemcpy(energies,devPtr_energies, npart*sizeof(float), hipMemcpyDeviceToHost),__LINE__);
errorHandler(hipMemcpy(SurfaceTensions,devPtr_SurfaceTensions, npart*sizeof(float), hipMemcpyDeviceToHost),__LINE__);
errorHandler(hipFree(devPtr_forces),__LINE__);
errorHandler(hipFree(devPtr_positions),__LINE__);
errorHandler(hipFree(devPtr_energies),__LINE__);
errorHandler(hipFree(devPtr_SurfaceTensions),__LINE__);
*st = 0;
*en =0;
for(i=0; i<npart; i++) {
*en += energies[i];
*st += SurfaceTensions[i];
}
*st /= 4*box[0]*box[1];
//*st = abs(*st);
free(energies);
free(SurfaceTensions);
}
/*__global__ void integrateParticle1(const int npart, const float* box, const float* f, float* positions, float* velocities, const float dt) {
int threadId = blockIdx.x*blockDim.x + threadIdx.x;
if(threadId >= npart)
return;
int j;
for( j=0; j<3; j++) {
positions[3*threadId+j] += dt* velocities[3*threadId+j] +dt*dt* f[3*threadId+j]/2;
//apply periodic boundary conditions
if(positions[3*threadId+j] > box[j]/2 || positions[3*threadId+j] < -box[j]/2) {
positions[3*threadId+j] -= round(positions[3*threadId+j]/box[j])*box[j];
}
velocities[3*threadId+j] += dt*f[3*threadId+j]/2;
}
}
__global__ void integrateParticle2(const int npart, const float* f, float* velocities, const float dt) {
int threadId = blockIdx.x*blockDim.x + threadIdx.x;
if(threadId >= npart)
return;
int j;
for( j=0; j<3; j++) {
velocities[3*threadId+j] += dt*f[3*threadId+j]/2;
}
}
void integrateVelVerlet(const float en, const float* box, const int part, const float* f, const int npart, float* positions, float* velocities, const float dt, const float temp_target, const float nu, const int frame) {
int i,j;
if(part ==1) {
float *devPtr_forces;
float *devPtr_positions;
float *devPtr_velocities;
float *devPtr_box;
errorHandler(hipMalloc((void**)&devPtr_forces, 3*npart* sizeof(float)),__LINE__);
errorHandler(hipMalloc((void**)&devPtr_positions, 3*npart* sizeof(float)),__LINE__);
errorHandler(hipMalloc((void**)&devPtr_velocities, 3*npart*sizeof(float)),__LINE__);
errorHandler(hipMalloc((void**)&devPtr_box, 3*sizeof(float)),__LINE__);
errorHandler(hipMemcpy(devPtr_forces, f, 3*npart * sizeof(float), hipMemcpyHostToDevice),__LINE__);
errorHandler(hipMemcpy(devPtr_positions, positions, 3*npart * sizeof(float), hipMemcpyHostToDevice),__LINE__);
errorHandler(hipMemcpy(devPtr_velocities, velocities, 3*npart *sizeof(float), hipMemcpyHostToDevice),__LINE__);
errorHandler(hipMemcpy(devPtr_box, box, 3*sizeof(float), hipMemcpyHostToDevice),__LINE__);
int threadsPerBlock = 512;
int blocks = npart/threadsPerBlock + 1;
//fprintf(stderr,"starting GPU calc\n");
integrateParticle1<<<blocks, threadsPerBlock>>>(npart, devPtr_box, devPtr_forces, devPtr_positions, devPtr_velocities, dt);
errorHandler( hipPeekAtLastError(),__LINE__);
errorHandler(hipMemcpy(positions, devPtr_positions, 3*npart * sizeof(float), hipMemcpyDeviceToHost),__LINE__);
errorHandler(hipMemcpy(velocities, devPtr_velocities, 3*npart * sizeof(float), hipMemcpyDeviceToHost),__LINE__);
errorHandler(hipFree(devPtr_forces),__LINE__);
errorHandler(hipFree(devPtr_positions),__LINE__);
errorHandler(hipFree(devPtr_velocities),__LINE__);
errorHandler(hipFree(devPtr_box),__LINE__);
}
if(part ==2) {
float *devPtr_forces;
float *devPtr_velocities;
errorHandler(hipMalloc((void**)&devPtr_forces, 3*npart* sizeof(float)),__LINE__);
errorHandler(hipMalloc((void**)&devPtr_velocities, 3*npart*sizeof(float)),__LINE__);
errorHandler(hipMemcpy(devPtr_forces, f, 3*npart * sizeof(float), hipMemcpyHostToDevice),__LINE__);
errorHandler(hipMemcpy(devPtr_velocities, velocities, 3*npart *sizeof(float), hipMemcpyHostToDevice),__LINE__);
int threadsPerBlock = 512;
int blocks = npart/threadsPerBlock + 1;
//fprintf(stderr,"starting GPU calc\n");
integrateParticle2<<<blocks, threadsPerBlock>>>(npart, devPtr_forces, devPtr_velocities, dt);
errorHandler( hipPeekAtLastError(),__LINE__);
errorHandler(hipMemcpy(velocities, devPtr_velocities, 3*npart * sizeof(float), hipMemcpyDeviceToHost),__LINE__);
errorHandler(hipFree(devPtr_forces),__LINE__);
errorHandler(hipFree(devPtr_velocities),__LINE__);
//temp_current /= 3*npart;
float sigma = sqrt(temp_target);
//float* sumv = (float*)calloc(3,sizeof(float));
if(fabs(nu) > 0.001) {
for(i=0; i<npart; i++) {
if(rand()/(float)RAND_MAX < nu*dt) {
for(j=0; j<3; j++) {
velocities[3*i+j] = gauss(sigma,0);
}
}
//sumv[0] += velocities[3*i];
//sumv[1] += velocities[3*i+1];
//sumv[2] += velocities[3*i+2];
}
}
//sumv[0] /=npart;
//sumv[1] /=npart;
//sumv[2] /=npart;
//float sumvSquared = sumv[0]*sumv[0] + sumv[1]*sumv[1] + sumv[2]*sumv[2];
//float etot = (en + 0.5*temp_current)/npart;
//fprintf(stdout,"total Vel CMS: %f VelX: %8.5f VelY %8.5f VelZ %8.5f Temp: %f Energy: %f frame: %d\n",sumvSquared, sumv[0], sumv[1], sumv[2], temp_current, etot, frame);
//fprintf(stdout,"Temp: %f Energy: %f frame: %d\n", temp_current, etot, frame);
//fprintf(stdout,"Velocity Center of mass: %f frame: %d\n",sumv2, frame);
}
}*/
#else
void calcForce(float* en,float* f, const float* positions, const int npart, const float *box, const float cutoff2, const int n, const float ecut) {
int i,j;
*en = 0;
for(i=0; i <3*npart; i++) {
f[i] = 0;
}
float rx;
float ry;
float rz;
float r2;
float r2i;
float r6i;
float ff; //lennard-jones potential
for(i=0; i < npart-1; i++) {
for(j=i+1; j<npart; j++) {
rx = positions[3*i] - positions[3*j];
ry = positions[3*i+1] - positions[3*j+1];
rz = positions[3*i+2] - positions[3*j+2];
rx -= box[0]* round(rx/box[0]);
ry -= box[1]* round(ry/box[1]);
rz -= box[2]* round(rz/box[2]);
r2 = rx*rx + ry*ry + rz*rz;
if(r2 < 0.1) fprintf(stderr,"Not good! i %d j %d distance %f frame %d\n",i,j,r2,n);
if(r2 < cutoff2) {
r2i = 1/r2;
r6i = r2i*r2i*r2i;
ff = 48*r2i*r6i*(r6i - 0.5);
f[3*i] += ff*rx;
f[3*j] -= ff*rx;
f[3*i+1] += ff*ry;
f[3*j+1] -= ff*ry;
f[3*i+2] += ff*rz;
f[3*j+2] -= ff*rz;
*en += 4*r6i*(r6i-1) - ecut;
}
}
}
}
/*void integrateVelVerlet(const float en, const float* box, const int part, const float* f, const int npart, float* positions, float* velocities, const float dt, const float temp_target, const float nu, const int frame) {
int i,j;
if(part ==1) {
for(i=0; i<npart; i++)
for(j=0; j<3; j++) {
positions[3*i+j] += dt* velocities[3*i+j] +dt*dt* f[3*i+j]/2; //update position
//apply periodic boundary conditions
if(positions[3*i+j] > box[j]/2 || positions[3*i+j] < -box[j]/2) {
//float pposition = positions[3*i+j];
//positions[3*i+j] -= floor(positions[3*i+j]/box[j])*box[j];
positions[3*i+j] -= round(positions[3*i+j]/box[j])*box[j];
//fprintf(stdout,"Out of box positive particle %d in frame %d. Calculated position %f, corrected position %f\n",i,frame,pposition, positions[3*i+j]);
}
velocities[3*i+j] += dt*f[3*i+j]/2;
}
}
if(part ==2) {
float temp_current =0;
for(i=0; i<npart; i++) {
for(j=0; j<3; j++){
velocities[3*i+j] = velocities[3*i+j] +dt* f[3*i+j]/2;
temp_current += velocities[3*i+j] *velocities[3*i+j];
}
}
temp_current /= 3*npart;
float sigma = sqrt(temp_target);
//float* sumv = (float*)calloc(3,sizeof(float));
if(fabs(nu) > 0.001) {
for(i=0; i<npart; i++) {
if(rand()/(float)RAND_MAX < nu*dt) {
for(j=0; j<3; j++) {
velocities[3*i+j] = gauss(sigma,0);
}
}
//sumv[0] += velocities[3*i];
//sumv[1] += velocities[3*i+1];
//sumv[2] += velocities[3*i+2];
}
}
//sumv[0] /=npart;
//sumv[1] /=npart;
//sumv[2] /=npart;
//float sumvSquared = sumv[0]*sumv[0] + sumv[1]*sumv[1] + sumv[2]*sumv[2];
float etot = (en + 0.5*temp_current)/npart;
//fprintf(stdout,"total Vel CMS: %f VelX: %8.5f VelY %8.5f VelZ %8.5f Temp: %f Energy: %f frame: %d\n",sumvSquared, sumv[0], sumv[1], sumv[2], temp_current, etot, frame);
fprintf(stdout,"Temp: %f Energy: %f frame: %d\n", temp_current, etot, frame);
//fprintf(stdout,"Velocity Center of mass: %f frame: %d\n",sumv2, frame);
}
}*/
#endif
void integrateVelVerlet(const int sampleStep, const float surfaceTension, const float en, const float* box, const int part, const float* f, const int npart, float* positions, float* velocities, const float dt, const float temp_target, const float nu, const int frame) {
int i,j;
if(part ==1) {
for(i=0; i<npart; i++)
for(j=0; j<3; j++) {
positions[3*i+j] += dt* velocities[3*i+j] +dt*dt* f[3*i+j]/2; //update position
//apply periodic boundary conditions
if(positions[3*i+j] > box[j]/2 || positions[3*i+j] < -box[j]/2) {
//float pposition = positions[3*i+j];
//positions[3*i+j] -= floor(positions[3*i+j]/box[j])*box[j];
positions[3*i+j] -= round(positions[3*i+j]/box[j])*box[j];
//fprintf(stdout,"Out of box positive particle %d in frame %d. Calculated position %f, corrected position %f\n",i,frame,pposition, positions[3*i+j]);
}
velocities[3*i+j] += dt*f[3*i+j]/2;
}
}
if(part ==2) {
float temp_current =0;
for(i=0; i<npart; i++) {
for(j=0; j<3; j++){
velocities[3*i+j] = velocities[3*i+j] +dt* f[3*i+j]/2;
temp_current += velocities[3*i+j] *velocities[3*i+j];
}
}
temp_current /= 3*npart;
float sigma = sqrt(temp_target);
//float* sumv = (float*)calloc(3,sizeof(float));
if(fabs(nu) > 0.001) {
for(i=0; i<npart; i++) {
if(rand()/(float)RAND_MAX < nu*dt) {
for(j=0; j<3; j++) {
velocities[3*i+j] = gauss(sigma,0);
}
}
//sumv[0] += velocities[3*i];
//sumv[1] += velocities[3*i+1];
//sumv[2] += velocities[3*i+2];
}
}
//sumv[0] /=npart;
//sumv[1] /=npart;
//sumv[2] /=npart;
//float sumvSquared = sumv[0]*sumv[0] + sumv[1]*sumv[1] + sumv[2]*sumv[2];
float etot = (en + 0.5*temp_current)/npart;
//fprintf(stdout,"total Vel CMS: %f VelX: %8.5f VelY %8.5f VelZ %8.5f Temp: %f Energy: %f frame: %d\n",sumvSquared, sumv[0], sumv[1], sumv[2], temp_current, etot, frame);
if(!(frame%sampleStep))
fprintf(stdout,"Temp: %f Energy: %f Surface Tension: %8.5f frame: %d\n", temp_current, etot,surfaceTension, frame);
//fprintf(stdout,"Velocity Center of mass: %f frame: %d\n",sumv2, frame);
}
}
void integrateVerlet(const float *box, const float* f, const float en, const int npart, float* positions, float* ppositions, float* velocities, const float dt, const int frame) {
int i,j;
float* sumv = (float*) calloc(3,sizeof(float));
float sumv2 = 0;
float xx = 0;
for(i=0; i<npart; i++) {
for(j=0; j<3; j++) {
xx = 2*positions[3*i+j] - ppositions[3*i+j] + dt*dt*f[3*i+j];
velocities[3*i+j] = (xx - ppositions[3*i+j])/(2*dt);
sumv[j] += velocities[3*i+j];
sumv2 += velocities[3*i+j]*velocities[3*i+j];
ppositions[3*i+j] = positions[3*i+j];
positions[3*i+j] = xx;
}
}
sumv[0] /=npart;
sumv[1] /=npart;
sumv[2] /=npart;
float sumvSquared = sumv[0]*sumv[0] + sumv[1]*sumv[1] + sumv[2]*sumv[2];
float temp = sumv2/(3*npart);
float etot = (en + 0.5*sumv2)/npart;
fprintf(stdout,"Vel CMS: %f Temp: %f Energy: %f frame: %d\n",sumvSquared, temp, etot, frame);
free(sumv);
}
//void sample(FILE* xres, FILE* vres, FILE* fres, int npart, float* positions, float* velocities, float* f) {
void sample(FILE* xres, FILE* vres, int npart, float* positions, float* velocities) {
int i;
for(i=0; i<npart; i++) {
//fprintf(xres, " %d %8.8f %8.8f %8.8f \n", i,positions[3*i],positions[3*i+1],positions[3*i+2]);
fprintf(xres, " atom %8.8f %8.8f %8.8f \n", positions[3*i],positions[3*i+1],positions[3*i+2]);
}
for(i=0; i<npart; i++) {
//fprintf(vres, " %d %8.8f %8.8f %8.8f \n", i,velocities[3*i],velocities[3*i+1],velocities[3*i+2]);
fprintf(vres, " atom %8.8f %8.8f %8.8f \n",velocities[3*i],velocities[3*i+1],velocities[3*i+2]);
}
}
int main(int argc, char* argv[])
{
int npart,sampleStep;
float temp,dt,tmax,cutoff, cutoff2,nu;
float box[3];
//cutoff for lennard jones
char* inXpath;
char* inVpath;
int thermostat =0;
int continued = 0;
int preset =0;
if(argc==10) {
sscanf(argv[1],"%d",&npart); //units: https://en.wikipedia.org/wiki/Lennard-Jones_potential#Dimensionless_.28reduced.29_units
sscanf(argv[2],"%f",&temp);
sscanf(argv[3],"%f",&dt);
sscanf(argv[4],"%f",&tmax);
sscanf(argv[5],"%d",&sampleStep);
sscanf(argv[6],"%f",&box[0]);
sscanf(argv[7],"%f",&box[1]);
sscanf(argv[8],"%f",&box[2]);
sscanf(argv[9],"%f",&cutoff);
//xpath = argv[9];
//vpath = argv[10];
//endXpath = argv[10];
//endVpath = argv[11];
printf("Input: Particles: %d Temperature: %f timestep: %f Maxtime: %f Samplestep: %d BoxX: %f BoxY: %f BoxZ: %f cutoff: %f\n",npart,temp,dt,tmax, sampleStep, box[0],box[1],box[2],cutoff);
} else if(argc==11) {
sscanf(argv[1],"%d",&npart); //units: https://en.wikipedia.org/wiki/Lennard-Jones_potential#Dimensionless_.28reduced.29_units
sscanf(argv[2],"%f",&temp);
sscanf(argv[3],"%f",&dt);
sscanf(argv[4],"%f",&tmax);
sscanf(argv[5],"%d",&sampleStep);
sscanf(argv[6],"%f",&box[0]);
sscanf(argv[7],"%f",&box[1]);
sscanf(argv[8],"%f",&box[2]);
sscanf(argv[9],"%f",&cutoff);
sscanf(argv[10],"%f",&nu);
thermostat =1;
printf("Input: Particles: %d Temperature: %f timestep: %f Maxtime: %f Samplestep: %d BoxX: %f BoxY: %f BoxZ: %f cutoff: %f collision-probability: %f run with thermostat\n",npart,temp,dt,tmax, sampleStep, box[0],box[1],box[2],cutoff,nu);
} else if(argc==12) {
//https://en.wikipedia.org/wiki/Lennard-Jones_potential#Dimensionless_.28reduced.29_units
sscanf(argv[1],"%f",&temp);
sscanf(argv[2],"%f",&dt);
sscanf(argv[3],"%f",&tmax);
sscanf(argv[4],"%d",&sampleStep);
sscanf(argv[5],"%f",&box[0]);
sscanf(argv[6],"%f",&box[1]);
sscanf(argv[7],"%f",&box[2]);
sscanf(argv[8],"%f",&cutoff);
sscanf(argv[9],"%f",&nu);
inXpath = argv[10];
thermostat =1;
preset =1;
printf("Input: Temperature: %f timestep: %f Maxtime: %f Samplestep: %d BoxX: %f BoxY: %f BoxZ: %f cutoff: %f collision-probability: %f continued run with thermostat reading path: %s\n",temp,dt,tmax, sampleStep, box[0],box[1],box[2],cutoff, nu, inXpath);
} else if(argc==13) {
//sscanf(argv[1],"%d",&npart); //units: https://en.wikipedia.org/wiki/Lennard-Jones_potential#Dimensionless_.28reduced.29_units
sscanf(argv[1],"%f",&temp);
sscanf(argv[2],"%f",&dt);
sscanf(argv[3],"%f",&tmax);
sscanf(argv[4],"%d",&sampleStep);
sscanf(argv[5],"%f",&box[0]);
sscanf(argv[6],"%f",&box[1]);
sscanf(argv[7],"%f",&box[2]);
sscanf(argv[8],"%f",&cutoff);
sscanf(argv[9],"%f",&nu);
inXpath = argv[10];
inVpath = argv[11];
thermostat =1;
continued =1;
printf("Input: Temperature: %f timestep: %f Maxtime: %f Samplestep: %d BoxX: %f BoxY: %f BoxZ: %f cutoff: %f collision-probability: %f continued run with thermostat reading path: %s reading path: %s\n",temp,dt,tmax, sampleStep, box[0],box[1],box[2],cutoff, nu, inXpath, inVpath);
} else {
printf("Syntax: ./MD (*not in continued run* <number of particles>) <temperature> <dt> <tmax> <sampleStep> <boxX> <boxY> <boxZ> <cutoffDistance> (*optional* <collision probability> <Readpath pos> <Readpath vel>/<preset y/n> <Continued y/n>)\n");
exit(-1);
}
cutoff2 = cutoff*cutoff;
FILE* xres = fopen("./pos.xyz","w");
FILE* vres = fopen("./vel.xyz","w");
if(!xres || !vres) {
printf("File not found!");
exit(-1);
}
float en, st;
float *positions =NULL;
float *ppositions = NULL;
float *velocities = NULL;
float *f = NULL;
if(continued) {
FILE* xin = fopen(inXpath,"r");
FILE* vin = fopen(inVpath,"r");
if(!xin || !vin) {
printf("File not found!");
exit(-1);
}
initContinued(&npart, &positions, &velocities, &f,xin,vin);
//fprintf(stderr,"pos %f %f %f\n", positions[0],positions[1],positions[2]);
fclose(xin);
fclose(vin);
} else if(preset) {
FILE* xin = fopen(inXpath,"r");
if(!xin) {
printf("File not found!");
exit(-1);
}
initPreset(temp, &npart, &positions, &velocities, &f,xin);
//fprintf(stderr,"pos %f %f %f\n", positions[0],positions[1],positions[2]);
fclose(xin);
} else {
initStart(npart, temp, &positions, &ppositions, &velocities, &f,dt,box);
}
fprintf(xres,"%d\n",npart);
fprintf(xres,"generated by my MD simulation\n");
fprintf(vres,"%d\n",npart);
fprintf(vres,"generated by my MD simulation\n");
float t = 0;
float cutoffi = 1/cutoff2;
float cutoff6i = cutoffi*cutoffi*cutoffi;
float ecut = 4*cutoff6i*(cutoff6i-1);
//void integrateVelVerlet(const int sampleStep, const float surfaceTension, const float en, const float* box, const int part, const float* f, const int npart, float* positions, float* velocities, const float dt, const float temp_target, const float nu, const int frame)
//calcForce(float *surfaceTension,float* en, float* f, const float* positions, const int npart, const float *box, const float cutoff2, const int n, const float ecut)
if(thermostat) calcForce(&st,&en,f, positions, npart, box, cutoff2,(int)(t/dt), ecut);
sample(xres, vres, npart, positions, velocities);
while(t < tmax) {
int frame = (int)(t/dt);
if(!thermostat) {
calcForce(&st,&en, f, positions, npart, box, cutoff2,frame, ecut);
integrateVerlet(box, f, en, npart, positions, ppositions, velocities, dt,frame);
t += dt;
sample(xres, vres, npart, positions, velocities);
fprintf(stderr,"Stage %% %f\r",(double)t/tmax*100.0);
} else {
integrateVelVerlet(sampleStep, st ,en,box, 1,f, npart, positions, velocities, dt, temp,nu,frame);
calcForce(&st, &en, f, positions, npart, box, cutoff2,frame, ecut);
integrateVelVerlet(sampleStep,st, en,box, 2,f, npart, positions, velocities, dt, temp,nu,frame);
t += dt;
if(!(frame%sampleStep)) {
sample(xres, vres, npart, positions, velocities);
FILE* xend = fopen("./posEnd.xyz","w");
FILE* vend = fopen("./velEnd.xyz","w");
if(!xend || !vend) {
printf("File not found!");
exit(-1);
}
fprintf(xend,"%d\n",npart);
fprintf(xend,"generated by my MD simulation\n");
fprintf(vend,"%d\n",npart);
fprintf(vend,"generated by my MD simulation\n");
sample(xend, vend, npart, positions, velocities);
fclose(xend);
fclose(vend);
}
fprintf(stderr,"Stage %% %f\r",(double)t/tmax*100.0);
}
}
fclose(xres);
fclose(vres);
free(f);
free(positions);
free(ppositions);
free(velocities);
}
|
e6f5f9efb6939c5882f32211fac5488705530d50.cu
|
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
void initStart(const int npart, const float temp, float** positions_ptr, float** ppositions_ptr, float** velocities_ptr, float** f_ptr, const float dt, const float *box) {
int i,j,k,counter;
float* sumv = (float*)calloc(3,sizeof(float));
float sumv2 = 0;
int dimensions = (int)ceil(cbrtf(npart));
float dx = box[0]/dimensions;
float dy = box[1]/dimensions;
float dz = box[2]/dimensions;
*positions_ptr = (float*) calloc(3*npart,sizeof(float));
*velocities_ptr = (float*) calloc(3*npart,sizeof(float));
*ppositions_ptr = (float*) calloc(3*npart,sizeof(float));
*f_ptr = (float*) calloc(3*npart,sizeof(float));
float* positions = *positions_ptr;
float* velocities = *velocities_ptr;
float* ppositions = *ppositions_ptr;
fprintf(stderr,"dimensions: %d dx: %f\n",dimensions,dx);
//srand(time(NULL));
srand(42);
for (i = 0; i < npart; i++) {
for(j = 0; j < 3; j++){
//positions[i*3 + j] = (rand()/(float)RAND_MAX)*box;
velocities[i*3 + j] = (rand()/(float)RAND_MAX - 0.5);
sumv[j] += velocities[i*3+j];
sumv2 += velocities[i*3+j]*velocities[i*3+j];
}
}
counter=0;
for (i=0; i<dimensions; i++)
for(j=0; j<dimensions; j++)
for(k=0; k<dimensions; k++) {
if(counter == npart) {
j=dimensions;
i=dimensions;
break;
}
positions[(dimensions*dimensions*i+dimensions*j+k)*3] = k*dx +dx/2 -box[0]/2;
positions[(dimensions*dimensions*i+dimensions*j+k)*3+1] = j*dy +dy/2 -box[1]/2;
positions[(dimensions*dimensions*i+dimensions*j+k)*3+2] = i*dz +dz/2 -box[2]/2;
counter++;
//fprintf(stderr,"counter: %d i= %d j=%d k=%d\n",counter,i,j,k);
}
sumv[0] /= npart;
sumv[1] /= npart;
sumv[2] /= npart;
sumv2 /= npart;
float vCMS2 = sumv[0]*sumv[0] + sumv[1]*sumv[1] + sumv[2]*sumv[2];
fprintf(stdout,"Velocity Center of mass: %f frame 0\n",vCMS2);
float fs = sqrt(3*temp/sumv2);
for (i = 0; i < npart; i++) {
for(j = 0; j < 3; j++) {
velocities[i*3 + j] -= sumv[j];
velocities[i*3 + j] *= fs;
ppositions[i*3 + j] = positions[i*3 + j] - velocities[i*3 + j]*dt;
}
}
free(sumv);
}
void initContinued(int *npart, float** positions_ptr, float** velocities_ptr, float** f_ptr, FILE* xin, FILE* vin) {
int i;
srand(420);
fscanf(xin,"%d\n",npart); //read number of particles
int npartCheck;
fscanf(vin,"%d\n",&npartCheck);
if(npartCheck != *npart) {
fprintf(stdout, "Error: Input files incompatible!\n");
exit(-1);
}
fprintf(stdout,"Particles: %d\n",*npart);
*positions_ptr = (float*) calloc(3*(*npart),sizeof(float));
*velocities_ptr = (float*) calloc(3*(*npart),sizeof(float));
*f_ptr = (float*) calloc(3*(*npart),sizeof(float));
float* positions = *positions_ptr;
float* velocities = *velocities_ptr;
//skip second line
fscanf(xin, "%*[^\n]\n", NULL);
fscanf(vin, "%*[^\n]\n", NULL);
for(i=0; i<*npart; i++) {
fscanf(xin, "atom %f %f %f\n",&positions[3*i],&positions[3*i+1],&positions[3*i+2]);
fscanf(vin, "atom %f %f %f\n",&velocities[3*i], &velocities[3*i+1], &velocities[3*i+2]);
//fprintf(stderr,"i: %d\n",i);
}
}
void initPreset(float temp, int *npart, float** positions_ptr, float** velocities_ptr, float** f_ptr, FILE* xin) {
int i,j;
float* sumv = (float*)calloc(3,sizeof(float));
float sumv2 = 0;
srand(421);
fscanf(xin,"%d\n",npart); //read number of particles
*positions_ptr = (float*) calloc(3*(*npart),sizeof(float));
*velocities_ptr = (float*) calloc(3*(*npart),sizeof(float));
*f_ptr = (float*) calloc(3*(*npart),sizeof(float));
float* positions = *positions_ptr;
float* velocities = *velocities_ptr;
//skip second line
fscanf(xin, "%*[^\n]\n", NULL);
for(i=0; i<*npart; i++)
fscanf(xin, "atom %f %f %f\n",&positions[3*i],&positions[3*i+1],&positions[3*i+2]);
for (i = 0; i < *npart; i++) {
for(j = 0; j < 3; j++){
velocities[i*3 + j] = (rand()/(float)RAND_MAX - 0.5);
sumv[j] += velocities[i*3+j];
sumv2 += velocities[i*3+j]*velocities[i*3+j];
}
}
sumv[0] /= *npart;
sumv[1] /= *npart;
sumv[2] /= *npart;
sumv2 /= *npart;
float vCMS2 = sumv[0]*sumv[0] + sumv[1]*sumv[1] + sumv[2]*sumv[2];
fprintf(stdout,"Velocity Center of mass: %f frame 0\n",vCMS2);
float fs = sqrt(3*temp/sumv2);
for (i = 0; i < *npart; i++) {
for(j = 0; j < 3; j++) {
velocities[i*3 + j] -= sumv[j];
velocities[i*3 + j] *= fs;
}
}
free(sumv);
}
float gauss(float sigma, float mean) {
float r =2.0;
float v1,v2;
while( r >= 1) {
v1=2*(rand()/(float)RAND_MAX)-1;
v2=2*(rand()/(float)RAND_MAX)-1;
r=v1*v1+v2*v2;
}
float l= v1*sqrt(-2*log(r)/r);
return mean+sigma*l;
}
#ifdef CUDA
void errorHandler (cudaError_t error, int line){
if(error != cudaSuccess)
{
// print the CUDA error message and exit
fprintf(stderr,"CUDA error: %s in line number %d\n", cudaGetErrorString(error),line);
exit(-1);
}
}
__global__ void particleForce(float* f, const float* positions, const int npart, const float boxX, const float boxY, const float boxZ, const float cutoff2, const int n, const float ecut, float* energies,float* SurfaceTensions) {
int threadId = blockIdx.x*blockDim.x + threadIdx.x;
if(threadId >= npart)
return;
int j;
float rx;
float ry;
float rz;
float r2;
float r2i;
float r6i;
float ff; //lennard-jones potential
f[3*threadId] = 0;
f[3*threadId+1] =0;
f[3*threadId+2] =0;
energies[threadId] =0;
SurfaceTensions[threadId] =0;
for(j=0; j<npart; j++) {
if(j==threadId) continue;
rx = positions[3*threadId] - positions[3*j];
ry = positions[3*threadId+1] - positions[3*j+1];
rz = positions[3*threadId+2] - positions[3*j+2];
rx -= boxX* round(rx/boxX);
ry -= boxY* round(ry/boxY);
rz -= boxZ* round(rz/boxZ);
r2 = rx*rx + ry*ry + rz*rz;
if(r2 < 0.1) printf("Not good! i %d j %d distance %f frame %d\n",threadId,j,r2,n);
if(r2 < cutoff2) {
r2i = 1/r2;
r6i = r2i*r2i*r2i;
ff = 48*r2i*r6i*(r6i - 0.5);
f[3*threadId] += ff*rx;
f[3*threadId+1] += ff*ry;
f[3*threadId+2] += ff*rz;
if(j>threadId) {
//ff = abs(ff);
energies[threadId] += 4*r6i*(r6i-1) - ecut;
SurfaceTensions[threadId] += ff * (rx*rx + ry*ry - 2 * rz * rz);
}
}
}
}
void calcForce(float *st,float* en, float* f, const float* positions, const int npart, const float *box, const float cutoff2, const int n, const float ecut) {
float *devPtr_forces;
float *devPtr_positions;
float *devPtr_energies;
float *devPtr_SurfaceTensions;
int i;
float *energies = (float*)calloc(npart,sizeof(float));
float *SurfaceTensions = (float*)calloc(npart,sizeof(float));
errorHandler(cudaMalloc((void**)&devPtr_forces, 3*npart* sizeof(float)),__LINE__);
errorHandler(cudaMalloc((void**)&devPtr_positions, 3*npart* sizeof(float)),__LINE__);
errorHandler(cudaMalloc((void**)&devPtr_energies, npart*sizeof(float)),__LINE__);
errorHandler(cudaMalloc((void**)&devPtr_SurfaceTensions, npart*sizeof(float)),__LINE__);
//errorHandler(cudaMemcpy(devPtr_forces, f, 3*npart * sizeof(float), cudaMemcpyHostToDevice),__LINE__);
errorHandler(cudaMemcpy(devPtr_positions, positions, 3*npart * sizeof(float), cudaMemcpyHostToDevice),__LINE__);
int threadsPerBlock = 512;
int blocks = npart/threadsPerBlock + 1;
//fprintf(stderr,"starting GPU calc\n");
particleForce<<<blocks, threadsPerBlock>>>(devPtr_forces, devPtr_positions, npart, box[0], box[1], box[2], cutoff2, n,ecut, devPtr_energies,devPtr_SurfaceTensions);
errorHandler( cudaPeekAtLastError(),__LINE__);
errorHandler(cudaMemcpy(f, devPtr_forces, 3*npart * sizeof(float), cudaMemcpyDeviceToHost),__LINE__);
errorHandler(cudaMemcpy(energies,devPtr_energies, npart*sizeof(float), cudaMemcpyDeviceToHost),__LINE__);
errorHandler(cudaMemcpy(SurfaceTensions,devPtr_SurfaceTensions, npart*sizeof(float), cudaMemcpyDeviceToHost),__LINE__);
errorHandler(cudaFree(devPtr_forces),__LINE__);
errorHandler(cudaFree(devPtr_positions),__LINE__);
errorHandler(cudaFree(devPtr_energies),__LINE__);
errorHandler(cudaFree(devPtr_SurfaceTensions),__LINE__);
*st = 0;
*en =0;
for(i=0; i<npart; i++) {
*en += energies[i];
*st += SurfaceTensions[i];
}
*st /= 4*box[0]*box[1];
//*st = abs(*st);
free(energies);
free(SurfaceTensions);
}
/*__global__ void integrateParticle1(const int npart, const float* box, const float* f, float* positions, float* velocities, const float dt) {
int threadId = blockIdx.x*blockDim.x + threadIdx.x;
if(threadId >= npart)
return;
int j;
for( j=0; j<3; j++) {
positions[3*threadId+j] += dt* velocities[3*threadId+j] +dt*dt* f[3*threadId+j]/2;
//apply periodic boundary conditions
if(positions[3*threadId+j] > box[j]/2 || positions[3*threadId+j] < -box[j]/2) {
positions[3*threadId+j] -= round(positions[3*threadId+j]/box[j])*box[j];
}
velocities[3*threadId+j] += dt*f[3*threadId+j]/2;
}
}
__global__ void integrateParticle2(const int npart, const float* f, float* velocities, const float dt) {
int threadId = blockIdx.x*blockDim.x + threadIdx.x;
if(threadId >= npart)
return;
int j;
for( j=0; j<3; j++) {
velocities[3*threadId+j] += dt*f[3*threadId+j]/2;
}
}
void integrateVelVerlet(const float en, const float* box, const int part, const float* f, const int npart, float* positions, float* velocities, const float dt, const float temp_target, const float nu, const int frame) {
int i,j;
if(part ==1) {
float *devPtr_forces;
float *devPtr_positions;
float *devPtr_velocities;
float *devPtr_box;
errorHandler(cudaMalloc((void**)&devPtr_forces, 3*npart* sizeof(float)),__LINE__);
errorHandler(cudaMalloc((void**)&devPtr_positions, 3*npart* sizeof(float)),__LINE__);
errorHandler(cudaMalloc((void**)&devPtr_velocities, 3*npart*sizeof(float)),__LINE__);
errorHandler(cudaMalloc((void**)&devPtr_box, 3*sizeof(float)),__LINE__);
errorHandler(cudaMemcpy(devPtr_forces, f, 3*npart * sizeof(float), cudaMemcpyHostToDevice),__LINE__);
errorHandler(cudaMemcpy(devPtr_positions, positions, 3*npart * sizeof(float), cudaMemcpyHostToDevice),__LINE__);
errorHandler(cudaMemcpy(devPtr_velocities, velocities, 3*npart *sizeof(float), cudaMemcpyHostToDevice),__LINE__);
errorHandler(cudaMemcpy(devPtr_box, box, 3*sizeof(float), cudaMemcpyHostToDevice),__LINE__);
int threadsPerBlock = 512;
int blocks = npart/threadsPerBlock + 1;
//fprintf(stderr,"starting GPU calc\n");
integrateParticle1<<<blocks, threadsPerBlock>>>(npart, devPtr_box, devPtr_forces, devPtr_positions, devPtr_velocities, dt);
errorHandler( cudaPeekAtLastError(),__LINE__);
errorHandler(cudaMemcpy(positions, devPtr_positions, 3*npart * sizeof(float), cudaMemcpyDeviceToHost),__LINE__);
errorHandler(cudaMemcpy(velocities, devPtr_velocities, 3*npart * sizeof(float), cudaMemcpyDeviceToHost),__LINE__);
errorHandler(cudaFree(devPtr_forces),__LINE__);
errorHandler(cudaFree(devPtr_positions),__LINE__);
errorHandler(cudaFree(devPtr_velocities),__LINE__);
errorHandler(cudaFree(devPtr_box),__LINE__);
}
if(part ==2) {
float *devPtr_forces;
float *devPtr_velocities;
errorHandler(cudaMalloc((void**)&devPtr_forces, 3*npart* sizeof(float)),__LINE__);
errorHandler(cudaMalloc((void**)&devPtr_velocities, 3*npart*sizeof(float)),__LINE__);
errorHandler(cudaMemcpy(devPtr_forces, f, 3*npart * sizeof(float), cudaMemcpyHostToDevice),__LINE__);
errorHandler(cudaMemcpy(devPtr_velocities, velocities, 3*npart *sizeof(float), cudaMemcpyHostToDevice),__LINE__);
int threadsPerBlock = 512;
int blocks = npart/threadsPerBlock + 1;
//fprintf(stderr,"starting GPU calc\n");
integrateParticle2<<<blocks, threadsPerBlock>>>(npart, devPtr_forces, devPtr_velocities, dt);
errorHandler( cudaPeekAtLastError(),__LINE__);
errorHandler(cudaMemcpy(velocities, devPtr_velocities, 3*npart * sizeof(float), cudaMemcpyDeviceToHost),__LINE__);
errorHandler(cudaFree(devPtr_forces),__LINE__);
errorHandler(cudaFree(devPtr_velocities),__LINE__);
//temp_current /= 3*npart;
float sigma = sqrt(temp_target);
//float* sumv = (float*)calloc(3,sizeof(float));
if(fabs(nu) > 0.001) {
for(i=0; i<npart; i++) {
if(rand()/(float)RAND_MAX < nu*dt) {
for(j=0; j<3; j++) {
velocities[3*i+j] = gauss(sigma,0);
}
}
//sumv[0] += velocities[3*i];
//sumv[1] += velocities[3*i+1];
//sumv[2] += velocities[3*i+2];
}
}
//sumv[0] /=npart;
//sumv[1] /=npart;
//sumv[2] /=npart;
//float sumvSquared = sumv[0]*sumv[0] + sumv[1]*sumv[1] + sumv[2]*sumv[2];
//float etot = (en + 0.5*temp_current)/npart;
//fprintf(stdout,"total Vel CMS: %f VelX: %8.5f VelY %8.5f VelZ %8.5f Temp: %f Energy: %f frame: %d\n",sumvSquared, sumv[0], sumv[1], sumv[2], temp_current, etot, frame);
//fprintf(stdout,"Temp: %f Energy: %f frame: %d\n", temp_current, etot, frame);
//fprintf(stdout,"Velocity Center of mass: %f frame: %d\n",sumv2, frame);
}
}*/
#else
void calcForce(float* en,float* f, const float* positions, const int npart, const float *box, const float cutoff2, const int n, const float ecut) {
int i,j;
*en = 0;
for(i=0; i <3*npart; i++) {
f[i] = 0;
}
float rx;
float ry;
float rz;
float r2;
float r2i;
float r6i;
float ff; //lennard-jones potential
for(i=0; i < npart-1; i++) {
for(j=i+1; j<npart; j++) {
rx = positions[3*i] - positions[3*j];
ry = positions[3*i+1] - positions[3*j+1];
rz = positions[3*i+2] - positions[3*j+2];
rx -= box[0]* round(rx/box[0]);
ry -= box[1]* round(ry/box[1]);
rz -= box[2]* round(rz/box[2]);
r2 = rx*rx + ry*ry + rz*rz;
if(r2 < 0.1) fprintf(stderr,"Not good! i %d j %d distance %f frame %d\n",i,j,r2,n);
if(r2 < cutoff2) {
r2i = 1/r2;
r6i = r2i*r2i*r2i;
ff = 48*r2i*r6i*(r6i - 0.5);
f[3*i] += ff*rx;
f[3*j] -= ff*rx;
f[3*i+1] += ff*ry;
f[3*j+1] -= ff*ry;
f[3*i+2] += ff*rz;
f[3*j+2] -= ff*rz;
*en += 4*r6i*(r6i-1) - ecut;
}
}
}
}
/*void integrateVelVerlet(const float en, const float* box, const int part, const float* f, const int npart, float* positions, float* velocities, const float dt, const float temp_target, const float nu, const int frame) {
int i,j;
if(part ==1) {
for(i=0; i<npart; i++)
for(j=0; j<3; j++) {
positions[3*i+j] += dt* velocities[3*i+j] +dt*dt* f[3*i+j]/2; //update position
//apply periodic boundary conditions
if(positions[3*i+j] > box[j]/2 || positions[3*i+j] < -box[j]/2) {
//float pposition = positions[3*i+j];
//positions[3*i+j] -= floor(positions[3*i+j]/box[j])*box[j];
positions[3*i+j] -= round(positions[3*i+j]/box[j])*box[j];
//fprintf(stdout,"Out of box positive particle %d in frame %d. Calculated position %f, corrected position %f\n",i,frame,pposition, positions[3*i+j]);
}
velocities[3*i+j] += dt*f[3*i+j]/2;
}
}
if(part ==2) {
float temp_current =0;
for(i=0; i<npart; i++) {
for(j=0; j<3; j++){
velocities[3*i+j] = velocities[3*i+j] +dt* f[3*i+j]/2;
temp_current += velocities[3*i+j] *velocities[3*i+j];
}
}
temp_current /= 3*npart;
float sigma = sqrt(temp_target);
//float* sumv = (float*)calloc(3,sizeof(float));
if(fabs(nu) > 0.001) {
for(i=0; i<npart; i++) {
if(rand()/(float)RAND_MAX < nu*dt) {
for(j=0; j<3; j++) {
velocities[3*i+j] = gauss(sigma,0);
}
}
//sumv[0] += velocities[3*i];
//sumv[1] += velocities[3*i+1];
//sumv[2] += velocities[3*i+2];
}
}
//sumv[0] /=npart;
//sumv[1] /=npart;
//sumv[2] /=npart;
//float sumvSquared = sumv[0]*sumv[0] + sumv[1]*sumv[1] + sumv[2]*sumv[2];
float etot = (en + 0.5*temp_current)/npart;
//fprintf(stdout,"total Vel CMS: %f VelX: %8.5f VelY %8.5f VelZ %8.5f Temp: %f Energy: %f frame: %d\n",sumvSquared, sumv[0], sumv[1], sumv[2], temp_current, etot, frame);
fprintf(stdout,"Temp: %f Energy: %f frame: %d\n", temp_current, etot, frame);
//fprintf(stdout,"Velocity Center of mass: %f frame: %d\n",sumv2, frame);
}
}*/
#endif
void integrateVelVerlet(const int sampleStep, const float surfaceTension, const float en, const float* box, const int part, const float* f, const int npart, float* positions, float* velocities, const float dt, const float temp_target, const float nu, const int frame) {
int i,j;
if(part ==1) {
for(i=0; i<npart; i++)
for(j=0; j<3; j++) {
positions[3*i+j] += dt* velocities[3*i+j] +dt*dt* f[3*i+j]/2; //update position
//apply periodic boundary conditions
if(positions[3*i+j] > box[j]/2 || positions[3*i+j] < -box[j]/2) {
//float pposition = positions[3*i+j];
//positions[3*i+j] -= floor(positions[3*i+j]/box[j])*box[j];
positions[3*i+j] -= round(positions[3*i+j]/box[j])*box[j];
//fprintf(stdout,"Out of box positive particle %d in frame %d. Calculated position %f, corrected position %f\n",i,frame,pposition, positions[3*i+j]);
}
velocities[3*i+j] += dt*f[3*i+j]/2;
}
}
if(part ==2) {
float temp_current =0;
for(i=0; i<npart; i++) {
for(j=0; j<3; j++){
velocities[3*i+j] = velocities[3*i+j] +dt* f[3*i+j]/2;
temp_current += velocities[3*i+j] *velocities[3*i+j];
}
}
temp_current /= 3*npart;
float sigma = sqrt(temp_target);
//float* sumv = (float*)calloc(3,sizeof(float));
if(fabs(nu) > 0.001) {
for(i=0; i<npart; i++) {
if(rand()/(float)RAND_MAX < nu*dt) {
for(j=0; j<3; j++) {
velocities[3*i+j] = gauss(sigma,0);
}
}
//sumv[0] += velocities[3*i];
//sumv[1] += velocities[3*i+1];
//sumv[2] += velocities[3*i+2];
}
}
//sumv[0] /=npart;
//sumv[1] /=npart;
//sumv[2] /=npart;
//float sumvSquared = sumv[0]*sumv[0] + sumv[1]*sumv[1] + sumv[2]*sumv[2];
float etot = (en + 0.5*temp_current)/npart;
//fprintf(stdout,"total Vel CMS: %f VelX: %8.5f VelY %8.5f VelZ %8.5f Temp: %f Energy: %f frame: %d\n",sumvSquared, sumv[0], sumv[1], sumv[2], temp_current, etot, frame);
if(!(frame%sampleStep))
fprintf(stdout,"Temp: %f Energy: %f Surface Tension: %8.5f frame: %d\n", temp_current, etot,surfaceTension, frame);
//fprintf(stdout,"Velocity Center of mass: %f frame: %d\n",sumv2, frame);
}
}
void integrateVerlet(const float *box, const float* f, const float en, const int npart, float* positions, float* ppositions, float* velocities, const float dt, const int frame) {
int i,j;
float* sumv = (float*) calloc(3,sizeof(float));
float sumv2 = 0;
float xx = 0;
for(i=0; i<npart; i++) {
for(j=0; j<3; j++) {
xx = 2*positions[3*i+j] - ppositions[3*i+j] + dt*dt*f[3*i+j];
velocities[3*i+j] = (xx - ppositions[3*i+j])/(2*dt);
sumv[j] += velocities[3*i+j];
sumv2 += velocities[3*i+j]*velocities[3*i+j];
ppositions[3*i+j] = positions[3*i+j];
positions[3*i+j] = xx;
}
}
sumv[0] /=npart;
sumv[1] /=npart;
sumv[2] /=npart;
float sumvSquared = sumv[0]*sumv[0] + sumv[1]*sumv[1] + sumv[2]*sumv[2];
float temp = sumv2/(3*npart);
float etot = (en + 0.5*sumv2)/npart;
fprintf(stdout,"Vel CMS: %f Temp: %f Energy: %f frame: %d\n",sumvSquared, temp, etot, frame);
free(sumv);
}
//void sample(FILE* xres, FILE* vres, FILE* fres, int npart, float* positions, float* velocities, float* f) {
void sample(FILE* xres, FILE* vres, int npart, float* positions, float* velocities) {
int i;
for(i=0; i<npart; i++) {
//fprintf(xres, " %d %8.8f %8.8f %8.8f \n", i,positions[3*i],positions[3*i+1],positions[3*i+2]);
fprintf(xres, " atom %8.8f %8.8f %8.8f \n", positions[3*i],positions[3*i+1],positions[3*i+2]);
}
for(i=0; i<npart; i++) {
//fprintf(vres, " %d %8.8f %8.8f %8.8f \n", i,velocities[3*i],velocities[3*i+1],velocities[3*i+2]);
fprintf(vres, " atom %8.8f %8.8f %8.8f \n",velocities[3*i],velocities[3*i+1],velocities[3*i+2]);
}
}
int main(int argc, char* argv[])
{
int npart,sampleStep;
float temp,dt,tmax,cutoff, cutoff2,nu;
float box[3];
//cutoff for lennard jones
char* inXpath;
char* inVpath;
int thermostat =0;
int continued = 0;
int preset =0;
if(argc==10) {
sscanf(argv[1],"%d",&npart); //units: https://en.wikipedia.org/wiki/Lennard-Jones_potential#Dimensionless_.28reduced.29_units
sscanf(argv[2],"%f",&temp);
sscanf(argv[3],"%f",&dt);
sscanf(argv[4],"%f",&tmax);
sscanf(argv[5],"%d",&sampleStep);
sscanf(argv[6],"%f",&box[0]);
sscanf(argv[7],"%f",&box[1]);
sscanf(argv[8],"%f",&box[2]);
sscanf(argv[9],"%f",&cutoff);
//xpath = argv[9];
//vpath = argv[10];
//endXpath = argv[10];
//endVpath = argv[11];
printf("Input: Particles: %d Temperature: %f timestep: %f Maxtime: %f Samplestep: %d BoxX: %f BoxY: %f BoxZ: %f cutoff: %f\n",npart,temp,dt,tmax, sampleStep, box[0],box[1],box[2],cutoff);
} else if(argc==11) {
sscanf(argv[1],"%d",&npart); //units: https://en.wikipedia.org/wiki/Lennard-Jones_potential#Dimensionless_.28reduced.29_units
sscanf(argv[2],"%f",&temp);
sscanf(argv[3],"%f",&dt);
sscanf(argv[4],"%f",&tmax);
sscanf(argv[5],"%d",&sampleStep);
sscanf(argv[6],"%f",&box[0]);
sscanf(argv[7],"%f",&box[1]);
sscanf(argv[8],"%f",&box[2]);
sscanf(argv[9],"%f",&cutoff);
sscanf(argv[10],"%f",&nu);
thermostat =1;
printf("Input: Particles: %d Temperature: %f timestep: %f Maxtime: %f Samplestep: %d BoxX: %f BoxY: %f BoxZ: %f cutoff: %f collision-probability: %f run with thermostat\n",npart,temp,dt,tmax, sampleStep, box[0],box[1],box[2],cutoff,nu);
} else if(argc==12) {
//https://en.wikipedia.org/wiki/Lennard-Jones_potential#Dimensionless_.28reduced.29_units
sscanf(argv[1],"%f",&temp);
sscanf(argv[2],"%f",&dt);
sscanf(argv[3],"%f",&tmax);
sscanf(argv[4],"%d",&sampleStep);
sscanf(argv[5],"%f",&box[0]);
sscanf(argv[6],"%f",&box[1]);
sscanf(argv[7],"%f",&box[2]);
sscanf(argv[8],"%f",&cutoff);
sscanf(argv[9],"%f",&nu);
inXpath = argv[10];
thermostat =1;
preset =1;
printf("Input: Temperature: %f timestep: %f Maxtime: %f Samplestep: %d BoxX: %f BoxY: %f BoxZ: %f cutoff: %f collision-probability: %f continued run with thermostat reading path: %s\n",temp,dt,tmax, sampleStep, box[0],box[1],box[2],cutoff, nu, inXpath);
} else if(argc==13) {
//sscanf(argv[1],"%d",&npart); //units: https://en.wikipedia.org/wiki/Lennard-Jones_potential#Dimensionless_.28reduced.29_units
sscanf(argv[1],"%f",&temp);
sscanf(argv[2],"%f",&dt);
sscanf(argv[3],"%f",&tmax);
sscanf(argv[4],"%d",&sampleStep);
sscanf(argv[5],"%f",&box[0]);
sscanf(argv[6],"%f",&box[1]);
sscanf(argv[7],"%f",&box[2]);
sscanf(argv[8],"%f",&cutoff);
sscanf(argv[9],"%f",&nu);
inXpath = argv[10];
inVpath = argv[11];
thermostat =1;
continued =1;
printf("Input: Temperature: %f timestep: %f Maxtime: %f Samplestep: %d BoxX: %f BoxY: %f BoxZ: %f cutoff: %f collision-probability: %f continued run with thermostat reading path: %s reading path: %s\n",temp,dt,tmax, sampleStep, box[0],box[1],box[2],cutoff, nu, inXpath, inVpath);
} else {
printf("Syntax: ./MD (*not in continued run* <number of particles>) <temperature> <dt> <tmax> <sampleStep> <boxX> <boxY> <boxZ> <cutoffDistance> (*optional* <collision probability> <Readpath pos> <Readpath vel>/<preset y/n> <Continued y/n>)\n");
exit(-1);
}
cutoff2 = cutoff*cutoff;
FILE* xres = fopen("./pos.xyz","w");
FILE* vres = fopen("./vel.xyz","w");
if(!xres || !vres) {
printf("File not found!");
exit(-1);
}
float en, st;
float *positions =NULL;
float *ppositions = NULL;
float *velocities = NULL;
float *f = NULL;
if(continued) {
FILE* xin = fopen(inXpath,"r");
FILE* vin = fopen(inVpath,"r");
if(!xin || !vin) {
printf("File not found!");
exit(-1);
}
initContinued(&npart, &positions, &velocities, &f,xin,vin);
//fprintf(stderr,"pos %f %f %f\n", positions[0],positions[1],positions[2]);
fclose(xin);
fclose(vin);
} else if(preset) {
FILE* xin = fopen(inXpath,"r");
if(!xin) {
printf("File not found!");
exit(-1);
}
initPreset(temp, &npart, &positions, &velocities, &f,xin);
//fprintf(stderr,"pos %f %f %f\n", positions[0],positions[1],positions[2]);
fclose(xin);
} else {
initStart(npart, temp, &positions, &ppositions, &velocities, &f,dt,box);
}
fprintf(xres,"%d\n",npart);
fprintf(xres,"generated by my MD simulation\n");
fprintf(vres,"%d\n",npart);
fprintf(vres,"generated by my MD simulation\n");
float t = 0;
float cutoffi = 1/cutoff2;
float cutoff6i = cutoffi*cutoffi*cutoffi;
float ecut = 4*cutoff6i*(cutoff6i-1);
//void integrateVelVerlet(const int sampleStep, const float surfaceTension, const float en, const float* box, const int part, const float* f, const int npart, float* positions, float* velocities, const float dt, const float temp_target, const float nu, const int frame)
//calcForce(float *surfaceTension,float* en, float* f, const float* positions, const int npart, const float *box, const float cutoff2, const int n, const float ecut)
if(thermostat) calcForce(&st,&en,f, positions, npart, box, cutoff2,(int)(t/dt), ecut);
sample(xres, vres, npart, positions, velocities);
while(t < tmax) {
int frame = (int)(t/dt);
if(!thermostat) {
calcForce(&st,&en, f, positions, npart, box, cutoff2,frame, ecut);
integrateVerlet(box, f, en, npart, positions, ppositions, velocities, dt,frame);
t += dt;
sample(xres, vres, npart, positions, velocities);
fprintf(stderr,"Stage %% %f\r",(double)t/tmax*100.0);
} else {
integrateVelVerlet(sampleStep, st ,en,box, 1,f, npart, positions, velocities, dt, temp,nu,frame);
calcForce(&st, &en, f, positions, npart, box, cutoff2,frame, ecut);
integrateVelVerlet(sampleStep,st, en,box, 2,f, npart, positions, velocities, dt, temp,nu,frame);
t += dt;
if(!(frame%sampleStep)) {
sample(xres, vres, npart, positions, velocities);
FILE* xend = fopen("./posEnd.xyz","w");
FILE* vend = fopen("./velEnd.xyz","w");
if(!xend || !vend) {
printf("File not found!");
exit(-1);
}
fprintf(xend,"%d\n",npart);
fprintf(xend,"generated by my MD simulation\n");
fprintf(vend,"%d\n",npart);
fprintf(vend,"generated by my MD simulation\n");
sample(xend, vend, npart, positions, velocities);
fclose(xend);
fclose(vend);
}
fprintf(stderr,"Stage %% %f\r",(double)t/tmax*100.0);
}
}
fclose(xres);
fclose(vres);
free(f);
free(positions);
free(ppositions);
free(velocities);
}
|
5a61a453809b4c4589619919da33c6c0232f448f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kring_rotation_Ay.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
double *z = NULL;
hipMalloc(&z, XSIZE*YSIZE);
double xMax = 1;
double yMax = 1;
double zMax = 1;
double omegaX = 1;
double omegaY = 1;
double omegaZ = 1;
double omega = 1;
double fudge = 1;
double *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kring_rotation_Ay), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,z,xMax,yMax,zMax,omegaX,omegaY,omegaZ,omega,fudge,A);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kring_rotation_Ay), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,z,xMax,yMax,zMax,omegaX,omegaY,omegaZ,omega,fudge,A);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kring_rotation_Ay), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,z,xMax,yMax,zMax,omegaX,omegaY,omegaZ,omega,fudge,A);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
5a61a453809b4c4589619919da33c6c0232f448f.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kring_rotation_Ay.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
double *z = NULL;
cudaMalloc(&z, XSIZE*YSIZE);
double xMax = 1;
double yMax = 1;
double zMax = 1;
double omegaX = 1;
double omegaY = 1;
double omegaZ = 1;
double omega = 1;
double fudge = 1;
double *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kring_rotation_Ay<<<gridBlock,threadBlock>>>(x,y,z,xMax,yMax,zMax,omegaX,omegaY,omegaZ,omega,fudge,A);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kring_rotation_Ay<<<gridBlock,threadBlock>>>(x,y,z,xMax,yMax,zMax,omegaX,omegaY,omegaZ,omega,fudge,A);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kring_rotation_Ay<<<gridBlock,threadBlock>>>(x,y,z,xMax,yMax,zMax,omegaX,omegaY,omegaZ,omega,fudge,A);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
a44f7db4671199ff9700a645ccf07f1042bae23f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "core/gpu/euler_2d.cuh"
#include "core/gpu/reduce.cuh"
#include "core/grid/grid.h"
#include <hip/hip_runtime.h>
#include <algorithm>
template<>
inline CPU_GPU float max_speed (float v_c, float v_n, float u_c, float u_n)
{
const float zero = 0.0f;
const float splus = fmaxf (zero, fmaxf (u_c + v_c, u_n + v_n));
const float sminus = fminf (zero, fminf (u_c - v_c, u_n - v_n));
return fmaxf (splus, -sminus);
}
template <class float_type, int warps_count>
__global__ void euler_2d_calculate_dt_gpu_kernel (
float_type gamma,
const grid_topology topology,
const grid_geometry geometry,
float_type *workspace,
const float_type *p_rho,
const float_type *p_u,
const float_type *p_v,
const float_type *p_p)
{
const unsigned int first_cell_id = blockDim.x * blockIdx.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
float_type min_len = std::numeric_limits<float_type>::max ();
float_type max_speed = std::numeric_limits<float_type>::min ();
for (unsigned int cell_id = first_cell_id; cell_id < topology.get_cells_count (); cell_id += stride)
{
const float_type rho = p_rho[cell_id];
const float_type p = p_p[cell_id];
const float_type a = speed_of_sound_in_gas (gamma, p, rho);
const float_type u = p_u[cell_id];
const float_type v = p_v[cell_id];
max_speed = fmaxf (max_speed, fmaxf (fabsf (u + a), fabsf (u - a)));
max_speed = fmaxf (max_speed, fmaxf (fabsf (v + a), fabsf (v - a)));
for (unsigned int edge_id = 0; edge_id < topology.get_edges_count (cell_id); edge_id++)
{
const float_type edge_len = geometry.get_edge_area (cell_id, edge_id);
if (edge_len < min_len)
min_len = edge_len;
}
}
min_len = block_reduce <float_type, reduce_operation::min, warps_count> (min_len);
max_speed = block_reduce <float_type, reduce_operation::max, warps_count> (max_speed);
if (threadIdx.x == 0)
{
atomicMin (workspace + 0, min_len);
atomicMax (workspace + 1, max_speed);
}
}
template <class float_type>
float_type euler_2d_calculate_dt_gpu (
float_type gamma,
float_type cfl,
const grid_topology &topology,
const grid_geometry &geometry,
float_type *workspace,
const float_type *p_rho,
const float_type *p_u,
const float_type *p_v,
const float_type *p_p)
{
float_type cpu_workspace_copy[2];
float_type &min_len = cpu_workspace_copy[0];
float_type &max_speed = cpu_workspace_copy[1];
min_len = std::numeric_limits<float_type>::max ();
max_speed = std::numeric_limits<float_type>::min ();
hipMemcpy (workspace, cpu_workspace_copy, 2 * sizeof (float_type), hipMemcpyHostToDevice);
constexpr int warps_per_block = 32;
constexpr int warp_size = 32;
constexpr int threads_per_block = warps_per_block * warp_size;
const int blocks = std::min ((topology.get_cells_count () + threads_per_block - 1) / threads_per_block, 1024u);
hipLaunchKernelGGL(( euler_2d_calculate_dt_gpu_kernel<float_type, warp_size>) , dim3(blocks), dim3(threads_per_block), 0, 0,
gamma, topology, geometry, workspace, p_rho, p_u, p_v, p_p);
hipMemcpy (cpu_workspace_copy, workspace, 2 * sizeof (float_type), hipMemcpyDeviceToHost);
float_type new_dt = cfl * min_len / max_speed;
return new_dt;
}
template <class float_type>
__global__ void euler_2d_calculate_next_time_step_gpu_kernel (
float_type dt,
float_type gamma,
const grid_topology topology,
const grid_geometry geometry,
const float_type *p_rho,
float_type *p_rho_next,
const float_type *p_u,
float_type *p_u_next,
const float_type *p_v,
float_type *p_v_next,
const float_type *p_p,
float_type *p_p_next)
{
const unsigned int cell_id = blockIdx.x * blockDim.x + threadIdx.x;
if (cell_id < topology.get_cells_count ())
euler_2d_calculate_next_cell_values (
cell_id, dt, gamma, topology, geometry,
p_rho, p_rho_next, p_u, p_u_next, p_v, p_v_next, p_p, p_p_next);
}
template <class float_type>
void euler_2d_calculate_next_time_step_gpu (
float_type dt,
float_type gamma,
const grid_topology &topology,
const grid_geometry &geometry,
const float_type *p_rho,
float_type *p_rho_next,
const float_type *p_u,
float_type *p_u_next,
const float_type *p_v,
float_type *p_v_next,
const float_type *p_p,
float_type *p_p_next)
{
constexpr int threads_per_block = 1024;
const unsigned int blocks = (topology.get_cells_count () + threads_per_block - 1) / threads_per_block;
hipLaunchKernelGGL(( euler_2d_calculate_next_time_step_gpu_kernel) , dim3(blocks), dim3(threads_per_block), 0, 0,
dt, gamma, topology, geometry,
p_rho, p_rho_next, p_u, p_u_next, p_v, p_v_next, p_p, p_p_next);
}
#define GEN_EULER_2D_INSTANCE_FOR(type) \
template type euler_2d_calculate_dt_gpu <type>( \
type gamma, type cfl, \
const grid_topology &, const grid_geometry &, \
type *workspace, const type *p_rho, \
const type *p_u, const type *p_v, const type *p_p); \
template void euler_2d_calculate_next_time_step_gpu ( \
type dt, type gamma, \
const grid_topology &, const grid_geometry &, \
const type *p_rho, type *p_rho_next, \
const type *p_u, type *p_u_next, const type *p_v, \
type *p_v_next, const type *p_p, type *p_p_next);
GEN_EULER_2D_INSTANCE_FOR (float)
GEN_EULER_2D_INSTANCE_FOR (double)
#undef GEN_EULER_2D_INTERFACE_INSTANCE_FOR
|
a44f7db4671199ff9700a645ccf07f1042bae23f.cu
|
#include "core/gpu/euler_2d.cuh"
#include "core/gpu/reduce.cuh"
#include "core/grid/grid.h"
#include <cuda_runtime.h>
#include <algorithm>
template<>
inline CPU_GPU float max_speed (float v_c, float v_n, float u_c, float u_n)
{
const float zero = 0.0f;
const float splus = fmaxf (zero, fmaxf (u_c + v_c, u_n + v_n));
const float sminus = fminf (zero, fminf (u_c - v_c, u_n - v_n));
return fmaxf (splus, -sminus);
}
template <class float_type, int warps_count>
__global__ void euler_2d_calculate_dt_gpu_kernel (
float_type gamma,
const grid_topology topology,
const grid_geometry geometry,
float_type *workspace,
const float_type *p_rho,
const float_type *p_u,
const float_type *p_v,
const float_type *p_p)
{
const unsigned int first_cell_id = blockDim.x * blockIdx.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
float_type min_len = std::numeric_limits<float_type>::max ();
float_type max_speed = std::numeric_limits<float_type>::min ();
for (unsigned int cell_id = first_cell_id; cell_id < topology.get_cells_count (); cell_id += stride)
{
const float_type rho = p_rho[cell_id];
const float_type p = p_p[cell_id];
const float_type a = speed_of_sound_in_gas (gamma, p, rho);
const float_type u = p_u[cell_id];
const float_type v = p_v[cell_id];
max_speed = fmaxf (max_speed, fmaxf (fabsf (u + a), fabsf (u - a)));
max_speed = fmaxf (max_speed, fmaxf (fabsf (v + a), fabsf (v - a)));
for (unsigned int edge_id = 0; edge_id < topology.get_edges_count (cell_id); edge_id++)
{
const float_type edge_len = geometry.get_edge_area (cell_id, edge_id);
if (edge_len < min_len)
min_len = edge_len;
}
}
min_len = block_reduce <float_type, reduce_operation::min, warps_count> (min_len);
max_speed = block_reduce <float_type, reduce_operation::max, warps_count> (max_speed);
if (threadIdx.x == 0)
{
atomicMin (workspace + 0, min_len);
atomicMax (workspace + 1, max_speed);
}
}
template <class float_type>
float_type euler_2d_calculate_dt_gpu (
float_type gamma,
float_type cfl,
const grid_topology &topology,
const grid_geometry &geometry,
float_type *workspace,
const float_type *p_rho,
const float_type *p_u,
const float_type *p_v,
const float_type *p_p)
{
float_type cpu_workspace_copy[2];
float_type &min_len = cpu_workspace_copy[0];
float_type &max_speed = cpu_workspace_copy[1];
min_len = std::numeric_limits<float_type>::max ();
max_speed = std::numeric_limits<float_type>::min ();
cudaMemcpy (workspace, cpu_workspace_copy, 2 * sizeof (float_type), cudaMemcpyHostToDevice);
constexpr int warps_per_block = 32;
constexpr int warp_size = 32;
constexpr int threads_per_block = warps_per_block * warp_size;
const int blocks = std::min ((topology.get_cells_count () + threads_per_block - 1) / threads_per_block, 1024u);
euler_2d_calculate_dt_gpu_kernel<float_type, warp_size> <<<blocks, threads_per_block>>> (
gamma, topology, geometry, workspace, p_rho, p_u, p_v, p_p);
cudaMemcpy (cpu_workspace_copy, workspace, 2 * sizeof (float_type), cudaMemcpyDeviceToHost);
float_type new_dt = cfl * min_len / max_speed;
return new_dt;
}
template <class float_type>
__global__ void euler_2d_calculate_next_time_step_gpu_kernel (
float_type dt,
float_type gamma,
const grid_topology topology,
const grid_geometry geometry,
const float_type *p_rho,
float_type *p_rho_next,
const float_type *p_u,
float_type *p_u_next,
const float_type *p_v,
float_type *p_v_next,
const float_type *p_p,
float_type *p_p_next)
{
const unsigned int cell_id = blockIdx.x * blockDim.x + threadIdx.x;
if (cell_id < topology.get_cells_count ())
euler_2d_calculate_next_cell_values (
cell_id, dt, gamma, topology, geometry,
p_rho, p_rho_next, p_u, p_u_next, p_v, p_v_next, p_p, p_p_next);
}
template <class float_type>
void euler_2d_calculate_next_time_step_gpu (
float_type dt,
float_type gamma,
const grid_topology &topology,
const grid_geometry &geometry,
const float_type *p_rho,
float_type *p_rho_next,
const float_type *p_u,
float_type *p_u_next,
const float_type *p_v,
float_type *p_v_next,
const float_type *p_p,
float_type *p_p_next)
{
constexpr int threads_per_block = 1024;
const unsigned int blocks = (topology.get_cells_count () + threads_per_block - 1) / threads_per_block;
euler_2d_calculate_next_time_step_gpu_kernel <<<blocks, threads_per_block>>> (
dt, gamma, topology, geometry,
p_rho, p_rho_next, p_u, p_u_next, p_v, p_v_next, p_p, p_p_next);
}
#define GEN_EULER_2D_INSTANCE_FOR(type) \
template type euler_2d_calculate_dt_gpu <type>( \
type gamma, type cfl, \
const grid_topology &, const grid_geometry &, \
type *workspace, const type *p_rho, \
const type *p_u, const type *p_v, const type *p_p); \
template void euler_2d_calculate_next_time_step_gpu ( \
type dt, type gamma, \
const grid_topology &, const grid_geometry &, \
const type *p_rho, type *p_rho_next, \
const type *p_u, type *p_u_next, const type *p_v, \
type *p_v_next, const type *p_p, type *p_p_next);
GEN_EULER_2D_INSTANCE_FOR (float)
GEN_EULER_2D_INSTANCE_FOR (double)
#undef GEN_EULER_2D_INTERFACE_INSTANCE_FOR
|
edge_array_device.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "edge_io_cuda.h"
// index[] points to the begin of dest[]
Edge_Array_Device::Edge_Array_Device( const Edge_List &b ) : Edge_Array(b) {
uint32_t *_index = index ;
uint32_t *_dest = dest ;
hipMalloc( &index, (N+1)*sizeof(uint32_t) ) ; // [0] ~ [N]
hipMalloc( &dest, (E+1)*sizeof(uint32_t) ) ; // +1 avoids empty allocation
hipMemcpy( index, _index, (N+1)*sizeof(uint32_t), hipMemcpyDefault ) ;
hipMemcpy( dest, _dest, (E+1)*sizeof(uint32_t), hipMemcpyDefault ) ;
delete [] _index ;
delete [] _dest ;
}
Edge_Array_Device::~Edge_Array_Device(){
if( index ){
hipFree( index ) ;
index = NULL ;
}
if( dest ){
hipFree( dest ) ;
dest = NULL ;
}
}
|
edge_array_device.cu
|
#include "edge_io_cuda.h"
// index[] points to the begin of dest[]
Edge_Array_Device::Edge_Array_Device( const Edge_List &b ) : Edge_Array(b) {
uint32_t *_index = index ;
uint32_t *_dest = dest ;
cudaMalloc( &index, (N+1)*sizeof(uint32_t) ) ; // [0] ~ [N]
cudaMalloc( &dest, (E+1)*sizeof(uint32_t) ) ; // +1 avoids empty allocation
cudaMemcpy( index, _index, (N+1)*sizeof(uint32_t), cudaMemcpyDefault ) ;
cudaMemcpy( dest, _dest, (E+1)*sizeof(uint32_t), cudaMemcpyDefault ) ;
delete [] _index ;
delete [] _dest ;
}
Edge_Array_Device::~Edge_Array_Device(){
if( index ){
cudaFree( index ) ;
index = NULL ;
}
if( dest ){
cudaFree( dest ) ;
dest = NULL ;
}
}
|
e8f12345ad9b7c04347dbdf3ee371b8ddfde3d7b.hip
|
// !!! This is a file automatically generated by hipify!!!
///
/// Kalman filter track reconstructor in the MPD detector - MnvertLocal function cuda version
/// \author Anna Fatkina
#include "MpdKalmanFilter.h"
#include "MpdKalmanTrack.h"
#include "MpdKalmanHit.h"
#include "MpdKalmanGeoScheme.h"
#include "MpdCodeTimer.h"
#include "FairField.h"
//#include "FairRootManager.h"
#include "FairRunAna.h"
#include "FairTask.h"
#include "MpdConstField.h"
#include "MpdMultiField.h"
#include <TMath.h>
#include <TGeoManager.h>
#include <TClonesArray.h>
#include <TLorentzVector.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <malloc.h>
#define NPP_MINABS_64F ( 1e-322 )
__device__ void L40Func (double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int kp1, int km1, int k);
__device__ void L50Func (double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int kp1, int km1, int k);
__device__ void L51Func(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int kp1, int km1, int k);
__device__ void L60Func(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail);
__device__ void ElenLeft(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail);
__device__ void MainLoop(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail);
__device__ void ScaleMatrix(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int *localFail);
__device__ void AfterScaleMatrix(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail);
__global__ void MnvertLocal_gpu (double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail);
// ****************
__device__ void L40Func (double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int kp1, int km1, int k)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
//int j = threadIdx.x + 1;
if (j <= km1)
{
localVERTpp_dev[j-1] = a_dev[j + k*l];
localVERTq_dev[j-1] = __dmul_rn(a_dev[j + k*l], localVERTq_dev[k-1]);
a_dev[j + k*l] = 0.0;
}
L50Func(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail, kp1, km1, k);
}
__device__ void L50Func (double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int kp1, int km1, int k)
{
if (k - n < 0)
{
L51Func(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail, kp1, km1, k);
}
else if (k - n == 0)
{
L60Func(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail);
}
else
{
ifail[0] = 50; //?????? never
}
}
__device__ void L51Func(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int kp1, int km1,int k)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
// int j = threadIdx.x + 1;
if (j >= kp1 && j <= n)
{
localVERTpp_dev[j-1] = a_dev[k + j*l];
localVERTq_dev[j-1] = __dmul_rn(-a_dev[k + j*l],localVERTq_dev[k-1]);
a_dev[k + j*l] = 0.0;
}
L60Func(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail);
}
__device__ void L60Func(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int k = blockIdx.y * blockDim.y + threadIdx.y + 1;
// int j = threadIdx.x + 1;
// int k = threadIdx.y + 1;
if (j <= n)
{
if (k >= j && k <= n)
{
a_dev[j + k*l] = __dadd_rn(a_dev[j + k*l], __dmul_rn(localVERTpp_dev[j-1],localVERTq_dev[k-1]));
}
}
// ElenLeft(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail);
}
__device__ void ElenLeft(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int k = blockIdx.y * blockDim.y + threadIdx.y + 1;
//int j = threadIdx.x + 1;
//int k = threadIdx.y + 1;
if(j <= n) {
if (k <= j) {
a_dev[k + j*l] = __dmul_rn(__dmul_rn(a_dev[k + j*l],localVERTs_dev[k-1]),localVERTs_dev[j-1]);
a_dev[j + k*l] = a_dev[k + j*l];
}
}
}
__device__ void MainLoop(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + 1;
// int i = threadIdx.x + 1;
int kp1, km1, k;
if (i <= n) {
k = i;
//*-*- preparation for elimination step1
if (fabs(a_dev[k + k*l]) < NPP_MINABS_64F)
{
*localFail = 1;
ifail[i-1] = 1;
return;
}
else
{
localVERTq_dev[k-1] = __drcp_rn(a_dev[k + k*l]);
}
localVERTpp_dev[k-1] = 1.0;
a_dev[k + k*l] = 0.0;
kp1 = k + 1;
km1 = k - 1;
if (km1 < 0)
{
ifail[i-1] = 2;
*localFail = 1;
return;
}
else if (km1 == 0)
{
L50Func(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail, kp1, km1, k);
}
else
{
L40Func(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail, kp1, km1, k);
}
}
}
__device__ void AfterScaleMatrix(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + 1;
int j = blockIdx.y * blockDim.y + threadIdx.y + 1;
//int i = threadIdx.x + 1;
//int j = threadIdx.y + 1;
if (i <= n)
{
if (j <= n)
{
a_dev[i + j*l] = __dmul_rn(__dmul_rn(a_dev[i + j*l], localVERTs_dev[i-1]), localVERTs_dev[j-1]);
}
}
}
__device__ void ScaleMatrix(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + 1;
int j = blockIdx.y * blockDim.y + threadIdx.y + 1;
//int i = threadIdx.x + 1;
//int j = threadIdx.y + 1;
double si;
if (i <= n)
{
si = a_dev[i + i*l];
if (si <= 0)
{
ifail[i-1] = 3;
*localFail=1;
return;
}
localVERTs_dev[i-1] =__drcp_rn(__dsqrt_rn(si)); }
AfterScaleMatrix( a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l,ifail, localFail);
}
__global__ void MnvertLocal_gpu (double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int k = blockIdx.y * blockDim.y + threadIdx.y + 1;
//int j =threadIdx.x + 1;
// int k = threadIdx.y + 1;
/*if (j <= n)
{
localVERTs_dev[j-1] = 33;
}
*/
if (j <= n)
{
localVERTs_dev[j-1] = 0.0;
localVERTq_dev[j-1] = 0.0;
localVERTpp_dev[j-1] = 0.0;
ifail[j-1] = 0;
}
*localFail = 0;
ScaleMatrix(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail, localFail);
if(*localFail == 1)
{
return;
}
*localFail = 0;
MainLoop(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail, localFail);
if(*localFail == 1)
{
return;
}
/*if (j <= n)
{
if (k <= j)
{
a_dev[k + j*l] = a_dev[k + j*l]*localVERTs_dev[k-1]*localVERTs_dev[j-1];
a_dev[j + k*l] = a_dev[k + j*l];
}
}*/
ElenLeft(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail);
}
//__________________________________________________________________________
extern "C" void MnvertLocal_cpu(Double_t *a, Int_t l, Int_t n,
int* ifail)
{
std::cout <<"!!!!!!HRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR!!!!";
hipSetDevice(0);
// taken from TMinuit package of Root (l>=n)
// fVERTs, fVERTq and fVERTpp changed to localVERTs, localVERTq and localVERTpp
// double_t localVERTs[n], localVERTq[n], localVERTpp[n];
/* double_t * localVERTs = new double_t[n];
double_t * localVERTq = new double_t[n];
double_t * localVERTpp = new double_t[n];
std::cout << "default: " << localVERTs[3] << std::endl;
*/
//***************************************************//
//std::cout << "size a = " << _msize(a)/a[0];
double * localVERTs_dev;
double * localVERTq_dev;
double * localVERTpp_dev;
double * a_dev;
int * ifail_dev;
hipMalloc((void**)&ifail_dev, n*sizeof(int));
hipMalloc((void**)&a_dev, n*n*sizeof(double));
hipMalloc((void**)&localVERTs_dev, n*sizeof(double));
hipMalloc((void**)&localVERTq_dev, n*sizeof(double));
hipMalloc((void**)&localVERTpp_dev, n*sizeof(double));
double * localVERTs_host;
double * localVERTq_host;
double * localVERTpp_host;
double * a_host;
int * ifail_host;
ifail_host = (int*)malloc(n*sizeof(int));
a_host = (double*)malloc(n*n*sizeof(double));
localVERTs_host = (double*)malloc(n*sizeof(double));
localVERTq_host = (double*)malloc(n*sizeof(double));
localVERTpp_host = (double*)malloc(n*sizeof(double));
hipDeviceSynchronize();
//**************************************************//
/* int k=1;
int *i;
i=&k;
std::cout<<*i;
*/
// fMaxint changed to localMaxint
Int_t localMaxint = n;
/* System generated locals */
Int_t aOffset;
/* Local variables */
//double_t si;
//Int_t kp1, km1;
aOffset = l + 1;
a -= aOffset;
for (int p = 0; p <n*n; ++p)
{
a_host[p] = (double)(a[p]) ;
}
hipMemcpy(a_dev, a_host, n*n*sizeof(double), hipMemcpyHostToDevice);
std::cout << "after memcopy to host from host, a:" << std::endl;
for (int p = 0; p <n*n; ++p)
{
std::cout << a_host[p] << " ";
}
int * localFail;
hipMalloc((void**)&localFail, sizeof(int));
//*localFail = 0;
std::cout << std::endl;
hipDeviceSynchronize();
/* Function Body */
*ifail = 0;
std::cout << "ifail making 0" << std::endl;
if (n < 1) goto L100;
if (n > localMaxint) goto L100;
std::cout << "Before cuda func " << std::endl;
hipLaunchKernelGGL(( MnvertLocal_gpu), dim3(n), dim3(n), 0, 0, a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, (int)n, (int)l, ifail_dev, localFail);
hipDeviceSynchronize();
hipMemcpy(localVERTs_host, localVERTs_dev, n*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(localVERTq_host, localVERTq_dev, n*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(localVERTpp_host, localVERTpp_dev, n*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(a_host, a_dev, n*n*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(ifail_host, ifail_dev, n*sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
std::cout << "after memcopy, ifail:" << std::endl;
for(int p = 0; p < n; ++p)
{
std::cout << ifail_host[p] << " ";
}
//ifail = *ifail_host;
std::cout << "after ifail, a:" << std::endl;
for (int p = 0; p <n*n; ++p)
{
a[p] = (Double_t)(a_host[p]) ;
std::cout << a[p] << " ";
}
std::cout << "after changing a" << std::endl;
/* for (int p = 0; p < n; p++)
{
localVERTs[p] = (double_t)localVERTs_host[p];
localVERTq[p] = (double_t)localVERTq_host[p];
localVERTpp[p] = (double_t)localVERTpp_host[p];
}
*/std::cout << "after changing 3 arrays" << std::endl;
/* for (int p = 0; p < n; p++)
{
std::cout << localVERTs[p] << " " << localVERTq[p] << " " << localVERTpp[p] << std::endl;
}
std::cout << "*****************************----------------******************"<< std::endl;
for(int p = 0; p < n*n; p++)
{
std::cout << a[p] << " ";
}*/
/* for (int p = 0; p <n*n; ++p)
{
a[p] = (double_t)(a_host[p]) ;
}*/
for(int p = 0; p < n; p++)
{
std::cout << ifail_host[p] << " ";
if (ifail_host[p] != 0)
{
*ifail = 1;
//break;
}
}
std::cout<< "after ifail count" << std::endl;
for (int p = 0; p < n; p++)
{
std::cout << localVERTs_host[p] << " " << localVERTq_host[p] << " " << localVERTpp_host[p] << std::endl;
}
std::cout << "*****************************----------------******************"<< std::endl;
hipFree(localVERTs_dev);
hipFree(localVERTq_dev);
hipFree(localVERTpp_dev);
hipFree(a_dev);
hipFree(ifail_dev);
hipFree(localFail);
free(localVERTs_host);
free(localVERTq_host);
free(localVERTpp_host);
free(a_host);
free(ifail_host);
/* delete [] localVERTs;
delete [] localVERTq;
delete [] localVERTpp;
*/return;
//*-*- failure return
L100:
std::cout << "IF IFAIL:" << std::endl;
for (int p = 0; p < n; p++)
{
std::cout << localVERTs_host[p] << " " << localVERTq_host[p] << " " << localVERTpp_host[p] << std::endl;
}
std::cout << "*****************************----------------******************"<< std::endl;
/*for (int p = 0; p < n; p++)
{
std::cout << localVERTs[p] << " " << localVERTq[p] << " " << localVERTpp[p] << std::endl;
}
std::cout << "*****************************----------------******************"<< std::endl;
for(int p = 0; p < n*n; p++)
{
std::cout << a[p] << " ";
}*/
hipFree(localVERTs_dev);
hipFree(localVERTq_dev);
hipFree(localVERTpp_dev);
hipFree(a_dev);
hipFree(ifail_dev);
hipFree(localFail);
free(localVERTs_host);
free(localVERTq_host);
free(localVERTpp_host);
free(a_host);
free(ifail_host);
/* delete [] localVERTs;
delete [] localVERTq;
delete [] localVERTpp;
*/
*ifail = 1;
} /* mnvertLocal */
|
e8f12345ad9b7c04347dbdf3ee371b8ddfde3d7b.cu
|
///
/// Kalman filter track reconstructor in the MPD detector - MnvertLocal function cuda version
/// \author Anna Fatkina
#include "MpdKalmanFilter.h"
#include "MpdKalmanTrack.h"
#include "MpdKalmanHit.h"
#include "MpdKalmanGeoScheme.h"
#include "MpdCodeTimer.h"
#include "FairField.h"
//#include "FairRootManager.h"
#include "FairRunAna.h"
#include "FairTask.h"
#include "MpdConstField.h"
#include "MpdMultiField.h"
#include <TMath.h>
#include <TGeoManager.h>
#include <TClonesArray.h>
#include <TLorentzVector.h>
#include <cuda.h>
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <malloc.h>
#define NPP_MINABS_64F ( 1e-322 )
__device__ void L40Func (double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int kp1, int km1, int k);
__device__ void L50Func (double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int kp1, int km1, int k);
__device__ void L51Func(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int kp1, int km1, int k);
__device__ void L60Func(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail);
__device__ void ElenLeft(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail);
__device__ void MainLoop(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail);
__device__ void ScaleMatrix(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int *localFail);
__device__ void AfterScaleMatrix(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail);
__global__ void MnvertLocal_gpu (double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail);
// ****************
__device__ void L40Func (double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int kp1, int km1, int k)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
//int j = threadIdx.x + 1;
if (j <= km1)
{
localVERTpp_dev[j-1] = a_dev[j + k*l];
localVERTq_dev[j-1] = __dmul_rn(a_dev[j + k*l], localVERTq_dev[k-1]);
a_dev[j + k*l] = 0.0;
}
L50Func(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail, kp1, km1, k);
}
__device__ void L50Func (double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int kp1, int km1, int k)
{
if (k - n < 0)
{
L51Func(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail, kp1, km1, k);
}
else if (k - n == 0)
{
L60Func(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail);
}
else
{
ifail[0] = 50; //?????? never
}
}
__device__ void L51Func(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int kp1, int km1,int k)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
// int j = threadIdx.x + 1;
if (j >= kp1 && j <= n)
{
localVERTpp_dev[j-1] = a_dev[k + j*l];
localVERTq_dev[j-1] = __dmul_rn(-a_dev[k + j*l],localVERTq_dev[k-1]);
a_dev[k + j*l] = 0.0;
}
L60Func(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail);
}
__device__ void L60Func(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int k = blockIdx.y * blockDim.y + threadIdx.y + 1;
// int j = threadIdx.x + 1;
// int k = threadIdx.y + 1;
if (j <= n)
{
if (k >= j && k <= n)
{
a_dev[j + k*l] = __dadd_rn(a_dev[j + k*l], __dmul_rn(localVERTpp_dev[j-1],localVERTq_dev[k-1]));
}
}
// ElenLeft(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail);
}
__device__ void ElenLeft(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int k = blockIdx.y * blockDim.y + threadIdx.y + 1;
//int j = threadIdx.x + 1;
//int k = threadIdx.y + 1;
if(j <= n) {
if (k <= j) {
a_dev[k + j*l] = __dmul_rn(__dmul_rn(a_dev[k + j*l],localVERTs_dev[k-1]),localVERTs_dev[j-1]);
a_dev[j + k*l] = a_dev[k + j*l];
}
}
}
__device__ void MainLoop(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + 1;
// int i = threadIdx.x + 1;
int kp1, km1, k;
if (i <= n) {
k = i;
//*-*- preparation for elimination step1
if (fabs(a_dev[k + k*l]) < NPP_MINABS_64F)
{
*localFail = 1;
ifail[i-1] = 1;
return;
}
else
{
localVERTq_dev[k-1] = __drcp_rn(a_dev[k + k*l]);
}
localVERTpp_dev[k-1] = 1.0;
a_dev[k + k*l] = 0.0;
kp1 = k + 1;
km1 = k - 1;
if (km1 < 0)
{
ifail[i-1] = 2;
*localFail = 1;
return;
}
else if (km1 == 0)
{
L50Func(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail, kp1, km1, k);
}
else
{
L40Func(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail, kp1, km1, k);
}
}
}
__device__ void AfterScaleMatrix(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + 1;
int j = blockIdx.y * blockDim.y + threadIdx.y + 1;
//int i = threadIdx.x + 1;
//int j = threadIdx.y + 1;
if (i <= n)
{
if (j <= n)
{
a_dev[i + j*l] = __dmul_rn(__dmul_rn(a_dev[i + j*l], localVERTs_dev[i-1]), localVERTs_dev[j-1]);
}
}
}
__device__ void ScaleMatrix(double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + 1;
int j = blockIdx.y * blockDim.y + threadIdx.y + 1;
//int i = threadIdx.x + 1;
//int j = threadIdx.y + 1;
double si;
if (i <= n)
{
si = a_dev[i + i*l];
if (si <= 0)
{
ifail[i-1] = 3;
*localFail=1;
return;
}
localVERTs_dev[i-1] =__drcp_rn(__dsqrt_rn(si)); }
AfterScaleMatrix( a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l,ifail, localFail);
}
__global__ void MnvertLocal_gpu (double* a_dev, double* localVERTs_dev, double* localVERTq_dev, double* localVERTpp_dev, int n, int l, int *ifail, int* localFail)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int k = blockIdx.y * blockDim.y + threadIdx.y + 1;
//int j =threadIdx.x + 1;
// int k = threadIdx.y + 1;
/*if (j <= n)
{
localVERTs_dev[j-1] = 33;
}
*/
if (j <= n)
{
localVERTs_dev[j-1] = 0.0;
localVERTq_dev[j-1] = 0.0;
localVERTpp_dev[j-1] = 0.0;
ifail[j-1] = 0;
}
*localFail = 0;
ScaleMatrix(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail, localFail);
if(*localFail == 1)
{
return;
}
*localFail = 0;
MainLoop(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail, localFail);
if(*localFail == 1)
{
return;
}
/*if (j <= n)
{
if (k <= j)
{
a_dev[k + j*l] = a_dev[k + j*l]*localVERTs_dev[k-1]*localVERTs_dev[j-1];
a_dev[j + k*l] = a_dev[k + j*l];
}
}*/
ElenLeft(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, n, l, ifail);
}
//__________________________________________________________________________
extern "C" void MnvertLocal_cpu(Double_t *a, Int_t l, Int_t n,
int* ifail)
{
std::cout <<"!!!!!!HRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR!!!!";
cudaSetDevice(0);
// taken from TMinuit package of Root (l>=n)
// fVERTs, fVERTq and fVERTpp changed to localVERTs, localVERTq and localVERTpp
// double_t localVERTs[n], localVERTq[n], localVERTpp[n];
/* double_t * localVERTs = new double_t[n];
double_t * localVERTq = new double_t[n];
double_t * localVERTpp = new double_t[n];
std::cout << "default: " << localVERTs[3] << std::endl;
*/
//***************************************************//
//std::cout << "size a = " << _msize(a)/a[0];
double * localVERTs_dev;
double * localVERTq_dev;
double * localVERTpp_dev;
double * a_dev;
int * ifail_dev;
cudaMalloc((void**)&ifail_dev, n*sizeof(int));
cudaMalloc((void**)&a_dev, n*n*sizeof(double));
cudaMalloc((void**)&localVERTs_dev, n*sizeof(double));
cudaMalloc((void**)&localVERTq_dev, n*sizeof(double));
cudaMalloc((void**)&localVERTpp_dev, n*sizeof(double));
double * localVERTs_host;
double * localVERTq_host;
double * localVERTpp_host;
double * a_host;
int * ifail_host;
ifail_host = (int*)malloc(n*sizeof(int));
a_host = (double*)malloc(n*n*sizeof(double));
localVERTs_host = (double*)malloc(n*sizeof(double));
localVERTq_host = (double*)malloc(n*sizeof(double));
localVERTpp_host = (double*)malloc(n*sizeof(double));
cudaDeviceSynchronize();
//**************************************************//
/* int k=1;
int *i;
i=&k;
std::cout<<*i;
*/
// fMaxint changed to localMaxint
Int_t localMaxint = n;
/* System generated locals */
Int_t aOffset;
/* Local variables */
//double_t si;
//Int_t kp1, km1;
aOffset = l + 1;
a -= aOffset;
for (int p = 0; p <n*n; ++p)
{
a_host[p] = (double)(a[p]) ;
}
cudaMemcpy(a_dev, a_host, n*n*sizeof(double), cudaMemcpyHostToDevice);
std::cout << "after memcopy to host from host, a:" << std::endl;
for (int p = 0; p <n*n; ++p)
{
std::cout << a_host[p] << " ";
}
int * localFail;
cudaMalloc((void**)&localFail, sizeof(int));
//*localFail = 0;
std::cout << std::endl;
cudaDeviceSynchronize();
/* Function Body */
*ifail = 0;
std::cout << "ifail making 0" << std::endl;
if (n < 1) goto L100;
if (n > localMaxint) goto L100;
std::cout << "Before cuda func " << std::endl;
MnvertLocal_gpu<<< n, n>>>(a_dev, localVERTs_dev, localVERTq_dev, localVERTpp_dev, (int)n, (int)l, ifail_dev, localFail);
cudaDeviceSynchronize();
cudaMemcpy(localVERTs_host, localVERTs_dev, n*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(localVERTq_host, localVERTq_dev, n*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(localVERTpp_host, localVERTpp_dev, n*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(a_host, a_dev, n*n*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(ifail_host, ifail_dev, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
std::cout << "after memcopy, ifail:" << std::endl;
for(int p = 0; p < n; ++p)
{
std::cout << ifail_host[p] << " ";
}
//ifail = *ifail_host;
std::cout << "after ifail, a:" << std::endl;
for (int p = 0; p <n*n; ++p)
{
a[p] = (Double_t)(a_host[p]) ;
std::cout << a[p] << " ";
}
std::cout << "after changing a" << std::endl;
/* for (int p = 0; p < n; p++)
{
localVERTs[p] = (double_t)localVERTs_host[p];
localVERTq[p] = (double_t)localVERTq_host[p];
localVERTpp[p] = (double_t)localVERTpp_host[p];
}
*/std::cout << "after changing 3 arrays" << std::endl;
/* for (int p = 0; p < n; p++)
{
std::cout << localVERTs[p] << " " << localVERTq[p] << " " << localVERTpp[p] << std::endl;
}
std::cout << "*****************************----------------******************"<< std::endl;
for(int p = 0; p < n*n; p++)
{
std::cout << a[p] << " ";
}*/
/* for (int p = 0; p <n*n; ++p)
{
a[p] = (double_t)(a_host[p]) ;
}*/
for(int p = 0; p < n; p++)
{
std::cout << ifail_host[p] << " ";
if (ifail_host[p] != 0)
{
*ifail = 1;
//break;
}
}
std::cout<< "after ifail count" << std::endl;
for (int p = 0; p < n; p++)
{
std::cout << localVERTs_host[p] << " " << localVERTq_host[p] << " " << localVERTpp_host[p] << std::endl;
}
std::cout << "*****************************----------------******************"<< std::endl;
cudaFree(localVERTs_dev);
cudaFree(localVERTq_dev);
cudaFree(localVERTpp_dev);
cudaFree(a_dev);
cudaFree(ifail_dev);
cudaFree(localFail);
free(localVERTs_host);
free(localVERTq_host);
free(localVERTpp_host);
free(a_host);
free(ifail_host);
/* delete [] localVERTs;
delete [] localVERTq;
delete [] localVERTpp;
*/return;
//*-*- failure return
L100:
std::cout << "IF IFAIL:" << std::endl;
for (int p = 0; p < n; p++)
{
std::cout << localVERTs_host[p] << " " << localVERTq_host[p] << " " << localVERTpp_host[p] << std::endl;
}
std::cout << "*****************************----------------******************"<< std::endl;
/*for (int p = 0; p < n; p++)
{
std::cout << localVERTs[p] << " " << localVERTq[p] << " " << localVERTpp[p] << std::endl;
}
std::cout << "*****************************----------------******************"<< std::endl;
for(int p = 0; p < n*n; p++)
{
std::cout << a[p] << " ";
}*/
cudaFree(localVERTs_dev);
cudaFree(localVERTq_dev);
cudaFree(localVERTpp_dev);
cudaFree(a_dev);
cudaFree(ifail_dev);
cudaFree(localFail);
free(localVERTs_host);
free(localVERTq_host);
free(localVERTpp_host);
free(a_host);
free(ifail_host);
/* delete [] localVERTs;
delete [] localVERTq;
delete [] localVERTpp;
*/
*ifail = 1;
} /* mnvertLocal */
|
498ca8d1484d996ad4f994a7043bd9c068364801.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__ void hello(char *a, int *b){
a[threadIdx.x] += b[threadIdx.x];
}
int main(){
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
hipMalloc( (void**)&ad, csize );
hipMalloc( (void**)&bd, isize );
hipMemcpy( ad, a, csize, hipMemcpyHostToDevice );
hipMemcpy( bd, b, isize, hipMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd);
hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost );
hipFree( ad );
hipFree( bd );
printf("%s\n", a);
return EXIT_SUCCESS;
}
|
498ca8d1484d996ad4f994a7043bd9c068364801.cu
|
// This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA with an array
// of offsets. Then the offsets are added in parallel to produce the string "World!"
// By Ingemar Ragnemalm 2010
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__ void hello(char *a, int *b){
a[threadIdx.x] += b[threadIdx.x];
}
int main(){
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *ad;
int *bd;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
printf("%s\n", a);
return EXIT_SUCCESS;
}
|
5a1c135fc7068c31967d5e0f4a643edd72f7a8a5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Algorithm 7.4 in Nocedal
//
// Author: Dai-Ni Hsieh ([email protected])
// Date : 05/20/2020
#include <rocblas.h>
#include "constants.h"
__global__ void vectorxpay(double *d_xVec, double aVal, double *d_yVec, int varNum)
{
int varIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( varIdx < varNum )
d_xVec[varIdx] += aVal * d_yVec[varIdx];
return;
}
__global__ void vectorScale(double *d_xVec, double scale, int varNum)
{
int varIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( varIdx < varNum )
d_xVec[varIdx] *= scale;
return;
}
void getDirection(double *d_dirVec, double HIniVal, double *d_grdNow, double *d_sMat, double *d_yMat,
int newIdx, int hisNum, int memNum, double *h_alpVec, int varNum, hipblasHandle_t blasHdl)
{
// s = x_next - x_now = dspVec
// y = (grad f)_next - (grad f)_now = dgdVec
// rho = 1 / (s^T y)
int blkNum = (varNum - 1) / BLKDIM + 1;
hipMemcpy(d_dirVec, d_grdNow, sizeof(double) * varNum, hipMemcpyDeviceToDevice);
for ( int hisCnt = 0; hisCnt < hisNum; ++hisCnt )
{
int hisIdx = newIdx - hisCnt;
if ( hisIdx < 0 ) hisIdx += memNum;
double *d_sVec = d_sMat + hisIdx * varNum;
double *d_yVec = d_yMat + hisIdx * varNum;
double h_sTq, h_sTy;
hipblasDdot(blasHdl, varNum, d_sVec, 1, d_dirVec, 1, &h_sTq);
hipblasDdot(blasHdl, varNum, d_sVec, 1, d_yVec, 1, &h_sTy);
double h_alpVal = h_sTq / h_sTy;
hipLaunchKernelGGL(( vectorxpay) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dirVec, -h_alpVal, d_yVec, varNum);
h_alpVec[hisIdx] = h_alpVal;
}
hipLaunchKernelGGL(( vectorScale) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dirVec, HIniVal, varNum);
int oldIdx = (hisNum < memNum ? 0 : newIdx + 1);
if ( oldIdx == memNum ) oldIdx = 0;
for ( int hisCnt = 0; hisCnt < hisNum; ++hisCnt )
{
int hisIdx = oldIdx + hisCnt;
if ( hisIdx >= memNum ) hisIdx -= memNum;
double h_alpVal = h_alpVec[hisIdx];
double *d_sVec = d_sMat + hisIdx * varNum;
double *d_yVec = d_yMat + hisIdx * varNum;
double h_yTr, h_sTy;
hipblasDdot(blasHdl, varNum, d_yVec, 1, d_dirVec, 1, &h_yTr);
hipblasDdot(blasHdl, varNum, d_sVec, 1, d_yVec, 1, &h_sTy);
double h_btaVal = h_yTr / h_sTy;
hipLaunchKernelGGL(( vectorxpay) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dirVec, h_alpVal - h_btaVal, d_sVec, varNum);
}
hipLaunchKernelGGL(( vectorScale) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dirVec, -1.0, varNum);
return;
}
|
5a1c135fc7068c31967d5e0f4a643edd72f7a8a5.cu
|
// Algorithm 7.4 in Nocedal
//
// Author: Dai-Ni Hsieh ([email protected])
// Date : 05/20/2020
#include <cublas_v2.h>
#include "constants.h"
__global__ void vectorxpay(double *d_xVec, double aVal, double *d_yVec, int varNum)
{
int varIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( varIdx < varNum )
d_xVec[varIdx] += aVal * d_yVec[varIdx];
return;
}
__global__ void vectorScale(double *d_xVec, double scale, int varNum)
{
int varIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( varIdx < varNum )
d_xVec[varIdx] *= scale;
return;
}
void getDirection(double *d_dirVec, double HIniVal, double *d_grdNow, double *d_sMat, double *d_yMat,
int newIdx, int hisNum, int memNum, double *h_alpVec, int varNum, cublasHandle_t blasHdl)
{
// s = x_next - x_now = dspVec
// y = (grad f)_next - (grad f)_now = dgdVec
// rho = 1 / (s^T y)
int blkNum = (varNum - 1) / BLKDIM + 1;
cudaMemcpy(d_dirVec, d_grdNow, sizeof(double) * varNum, cudaMemcpyDeviceToDevice);
for ( int hisCnt = 0; hisCnt < hisNum; ++hisCnt )
{
int hisIdx = newIdx - hisCnt;
if ( hisIdx < 0 ) hisIdx += memNum;
double *d_sVec = d_sMat + hisIdx * varNum;
double *d_yVec = d_yMat + hisIdx * varNum;
double h_sTq, h_sTy;
cublasDdot(blasHdl, varNum, d_sVec, 1, d_dirVec, 1, &h_sTq);
cublasDdot(blasHdl, varNum, d_sVec, 1, d_yVec, 1, &h_sTy);
double h_alpVal = h_sTq / h_sTy;
vectorxpay <<<blkNum, BLKDIM>>> (d_dirVec, -h_alpVal, d_yVec, varNum);
h_alpVec[hisIdx] = h_alpVal;
}
vectorScale <<<blkNum, BLKDIM>>> (d_dirVec, HIniVal, varNum);
int oldIdx = (hisNum < memNum ? 0 : newIdx + 1);
if ( oldIdx == memNum ) oldIdx = 0;
for ( int hisCnt = 0; hisCnt < hisNum; ++hisCnt )
{
int hisIdx = oldIdx + hisCnt;
if ( hisIdx >= memNum ) hisIdx -= memNum;
double h_alpVal = h_alpVec[hisIdx];
double *d_sVec = d_sMat + hisIdx * varNum;
double *d_yVec = d_yMat + hisIdx * varNum;
double h_yTr, h_sTy;
cublasDdot(blasHdl, varNum, d_yVec, 1, d_dirVec, 1, &h_yTr);
cublasDdot(blasHdl, varNum, d_sVec, 1, d_yVec, 1, &h_sTy);
double h_btaVal = h_yTr / h_sTy;
vectorxpay <<<blkNum, BLKDIM>>> (d_dirVec, h_alpVal - h_btaVal, d_sVec, varNum);
}
vectorScale <<<blkNum, BLKDIM>>> (d_dirVec, -1.0, varNum);
return;
}
|
422281fbe68c0626a969463f5734cc7d144ea9d5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/user/kernels/softmax_kernel_util.h"
#include <hipcub/hipcub.hpp>
namespace oneflow {
namespace {
constexpr int64_t kSoftmaxGpuBlockSize = 256;
template<typename T>
struct SoftmaxUtil {
using ComputeType = T;
__device__ static ComputeType ToComputeType(T v) { return v; }
__device__ static T FromComputeType(ComputeType v) { return v; }
};
template<>
struct SoftmaxUtil<half> {
using ComputeType = float;
__device__ static ComputeType ToComputeType(half v) { return __half2float(v); }
__device__ static half FromComputeType(ComputeType v) { return __float2half(v); }
};
__device__ double Exp(double x) { return exp(x); }
__device__ float Exp(float x) { return expf(x); }
template<typename T>
int GetForwardDynamicSharedMemorySize(const int num_classes) {
return num_classes * sizeof(typename SoftmaxUtil<T>::ComputeType);
}
template<typename T>
int GetBackwardDynamicSharedMemorySize(const int num_classes) {
return 2 * num_classes * sizeof(typename SoftmaxUtil<T>::ComputeType);
}
int GetSoftmaxBlockSize() { return kSoftmaxGpuBlockSize; }
int GetSoftmaxNumBlocks(const int num_instances) {
return ::min(static_cast<int>(num_instances), kCudaMaxBlocksNum);
}
template<typename T>
int GetMinNumClasses() {
return 32;
}
template<typename T>
__global__ void SoftmaxGpuForwardImpl(const int num_instances, const int num_classes, const T* in,
T* prob) {
using SU = SoftmaxUtil<T>;
using ComputeType = typename SU::ComputeType;
extern __shared__ __align__(sizeof(ComputeType)) unsigned char fw_shared_buf[];
auto* compute_buf = reinterpret_cast<ComputeType*>(fw_shared_buf);
__shared__ ComputeType row_reduce_result;
typedef hipcub::BlockReduce<ComputeType, kSoftmaxGpuBlockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage cub_reduce_tmp_storage;
const int tid = threadIdx.x;
for (int row = blockIdx.x; row < num_instances; row += gridDim.x) {
const int row_offset = row * num_classes;
const T* in_row = in + row_offset;
T* prob_row = prob + row_offset;
ComputeType thread_max = GetMinVal<ComputeType>();
for (int col = tid; col < num_classes; col += kSoftmaxGpuBlockSize) {
const ComputeType x = SU::ToComputeType(in_row[col]);
compute_buf[col] = x;
thread_max = max(thread_max, x);
}
__syncthreads();
ComputeType block_max = BlockReduce(cub_reduce_tmp_storage).Reduce(thread_max, hipcub::Max());
if (tid == 0) { row_reduce_result = block_max; }
__syncthreads();
const ComputeType row_max_t = row_reduce_result;
ComputeType thread_sum = 0;
for (int col = tid; col < num_classes; col += kSoftmaxGpuBlockSize) {
const ComputeType exp_x = Exp(compute_buf[col] - row_max_t);
compute_buf[col] = exp_x;
thread_sum += exp_x;
}
__syncthreads();
ComputeType block_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(thread_sum, hipcub::Sum());
if (tid == 0) { row_reduce_result = block_sum; }
__syncthreads();
const ComputeType row_sum_t = row_reduce_result;
for (int col = tid; col < num_classes; col += kSoftmaxGpuBlockSize) {
prob_row[col] = SU::FromComputeType(compute_buf[col] / row_sum_t);
}
}
}
template<typename T>
void SoftmaxForwardGpu(DeviceCtx* ctx, const int num_instances, const int num_classes, const T* in,
T* prob) {
hipLaunchKernelGGL(( SoftmaxGpuForwardImpl), dim3(GetSoftmaxNumBlocks(num_instances)), dim3(GetSoftmaxBlockSize()),
GetForwardDynamicSharedMemorySize<T>(num_classes), ctx->cuda_stream(),
num_instances, num_classes, in, prob);
}
template<>
void SoftmaxForwardGpu<float16>(DeviceCtx* ctx, const int num_instances, const int num_classes,
const float16* in, float16* prob) {
SoftmaxForwardGpu<half>(ctx, num_instances, num_classes, reinterpret_cast<const half*>(in),
reinterpret_cast<half*>(prob));
}
template<typename T>
int GetForwardFusedKernelMaxActiveBlocks(const int num_classes) {
int max_active_blocks;
OF_CUDA_CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, SoftmaxGpuForwardImpl<T>, GetSoftmaxBlockSize(),
GetForwardDynamicSharedMemorySize<T>(num_classes)));
return max_active_blocks;
}
template<>
int GetForwardFusedKernelMaxActiveBlocks<float16>(const int num_classes) {
int max_active_blocks;
OF_CUDA_CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, SoftmaxGpuForwardImpl<half>, GetSoftmaxBlockSize(),
GetForwardDynamicSharedMemorySize<half>(num_classes)));
return max_active_blocks;
}
template<typename T>
bool IsForwardFusedKernelSupported(const int num_classes) {
if (num_classes >= GetMinNumClasses<T>()
&& GetForwardFusedKernelMaxActiveBlocks<T>(num_classes) > 0) {
return true;
} else {
return false;
}
}
template<typename T>
__global__ void SoftmaxGpuBackwardImpl(const int num_instances, const int num_classes, const T* dy,
const T* prob, T* dx) {
using SU = SoftmaxUtil<T>;
using ComputeType = typename SU::ComputeType;
extern __shared__ __align__(sizeof(ComputeType)) unsigned char bw_shared_buf[];
auto* dy_buf = reinterpret_cast<ComputeType*>(bw_shared_buf);
auto* prob_buf =
reinterpret_cast<ComputeType*>(bw_shared_buf + num_classes * sizeof(ComputeType));
__shared__ ComputeType row_reduce_result;
typedef hipcub::BlockReduce<ComputeType, kSoftmaxGpuBlockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage cub_reduce_tmp_storage;
const int tid = threadIdx.x;
for (int row = blockIdx.x; row < num_instances; row += gridDim.x) {
const int row_offset = row * num_classes;
const T* dy_row = dy + row_offset;
const T* prob_row = prob + row_offset;
T* dx_row = dx + row_offset;
ComputeType thread_sum = 0;
for (int col = tid; col < num_classes; col += kSoftmaxGpuBlockSize) {
const ComputeType dy_col = SU::ToComputeType(dy_row[col]);
dy_buf[col] = dy_col;
const ComputeType prob_col = SU::ToComputeType(prob_row[col]);
prob_buf[col] = prob_col;
thread_sum += (dy_col * prob_col);
}
__syncthreads();
ComputeType block_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(thread_sum, hipcub::Sum());
if (tid == 0) { row_reduce_result = block_sum; }
__syncthreads();
const ComputeType row_sum_t = row_reduce_result;
for (int col = tid; col < num_classes; col += kSoftmaxGpuBlockSize) {
dx_row[col] = SU::FromComputeType((dy_buf[col] - row_sum_t) * prob_buf[col]);
}
}
}
template<typename T>
void SoftmaxBackwardGpu(DeviceCtx* ctx, const int num_instances, const int num_classes, const T* in,
const T* prob, T* dx) {
hipLaunchKernelGGL(( SoftmaxGpuBackwardImpl), dim3(GetSoftmaxNumBlocks(num_instances)), dim3(GetSoftmaxBlockSize()),
GetBackwardDynamicSharedMemorySize<T>(num_classes),
ctx->cuda_stream(), num_instances, num_classes, in, prob, dx);
}
template<>
void SoftmaxBackwardGpu<float16>(DeviceCtx* ctx, const int num_instances, const int num_classes,
const float16* in, const float16* prob, float16* dx) {
SoftmaxBackwardGpu<half>(ctx, num_instances, num_classes, reinterpret_cast<const half*>(in),
reinterpret_cast<const half*>(prob), reinterpret_cast<half*>(dx));
}
template<typename T>
int GetBackwardFusedKernelMaxActiveBlocks(const int num_classes) {
int max_active_blocks;
OF_CUDA_CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, SoftmaxGpuBackwardImpl<T>, GetSoftmaxBlockSize(),
GetBackwardDynamicSharedMemorySize<T>(num_classes)));
return max_active_blocks;
}
template<>
int GetBackwardFusedKernelMaxActiveBlocks<float16>(const int num_classes) {
int max_active_blocks;
OF_CUDA_CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, SoftmaxGpuBackwardImpl<half>, GetSoftmaxBlockSize(),
GetBackwardDynamicSharedMemorySize<half>(num_classes)));
return max_active_blocks;
}
template<typename T>
bool IsBackwardFusedKernelSupported(const int num_classes) {
if (num_classes >= GetMinNumClasses<T>()
&& GetBackwardFusedKernelMaxActiveBlocks<T>(num_classes) > 0) {
return true;
} else {
return false;
}
}
template<typename T>
class SoftmaxKernel final : public user_op::OpKernel {
public:
SoftmaxKernel() = default;
~SoftmaxKernel() override = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const ShapeView& in_shape = in->shape();
const int64_t num_classes = in_shape.At(in_shape.NumAxes() - 1);
const int64_t num_instances = in_shape.Count(0, in_shape.NumAxes() - 1);
if (IsForwardFusedKernelSupported<T>(num_classes)) {
SoftmaxForwardGpu<T>(ctx->device_ctx(), num_instances, num_classes, in->dptr<T>(),
out->mut_dptr<T>());
} else {
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
SoftmaxKernelUtil<DeviceType::kGPU, T>::ComputeProb(
ctx->device_ctx(), num_instances, num_classes, in->dptr<T>(), out->mut_dptr<T>(),
tmp_buffer->mut_dptr(), tmp_buffer->shape().elem_cnt());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_SOFTMAX_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("softmax") \
.SetCreateFn<SoftmaxKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape* in_shape = ctx->Shape4ArgNameAndIndex("in", 0); \
const int64_t num_classes = in_shape->At(in_shape->NumAxes() - 1); \
const int64_t num_instances = in_shape->Count(0, in_shape->NumAxes() - 1); \
return SoftmaxKernelUtil<DeviceType::kGPU, dtype>::GetComputeProbTempStorageSizeInBytes( \
num_instances, num_classes); \
});
REGISTER_SOFTMAX_GPU_KERNEL(float16)
REGISTER_SOFTMAX_GPU_KERNEL(float)
REGISTER_SOFTMAX_GPU_KERNEL(double)
#undef REGISTER_SOFTMAX_GPU_KERNEL
template<typename T>
class SoftmaxGradKernel final : public user_op::OpKernel {
public:
SoftmaxGradKernel() = default;
~SoftmaxGradKernel() override = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const int64_t num_classes = y->shape().At(y->shape().NumAxes() - 1);
const int64_t num_instances = y->shape().elem_cnt() / num_classes;
if (IsBackwardFusedKernelSupported<T>(num_classes)) {
SoftmaxBackwardGpu<T>(ctx->device_ctx(), num_instances, num_classes, dy->dptr<T>(),
y->dptr<T>(), dx->mut_dptr<T>());
} else {
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
SoftmaxKernelUtil<DeviceType::kGPU, T>::ComputeDiff(
ctx->device_ctx(), num_instances, num_classes, dy->dptr<T>(), y->dptr<T>(),
dx->mut_dptr<T>(), tmp_buffer->mut_dptr(), tmp_buffer->shape().elem_cnt());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_SOFTMAX_GRAD_KERNEL(dtype) \
REGISTER_USER_KERNEL("softmax_grad") \
.SetCreateFn<SoftmaxGradKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape* dy_shape = ctx->Shape4ArgNameAndIndex("dy", 0); \
const int64_t num_classes = dy_shape->At(dy_shape->NumAxes() - 1); \
const int64_t num_instances = dy_shape->Count(0, dy_shape->NumAxes() - 1); \
return SoftmaxKernelUtil<DeviceType::kGPU, dtype>::GetComputeProbTempStorageSizeInBytes( \
num_instances, num_classes); \
});
REGISTER_SOFTMAX_GRAD_KERNEL(float16)
REGISTER_SOFTMAX_GRAD_KERNEL(float)
REGISTER_SOFTMAX_GRAD_KERNEL(double)
#undef REGISTER_SOFTMAX_GRAD_KERNEL
} // namespace
} // namespace oneflow
|
422281fbe68c0626a969463f5734cc7d144ea9d5.cu
|
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/user/kernels/softmax_kernel_util.h"
#include <cub/cub.cuh>
namespace oneflow {
namespace {
constexpr int64_t kSoftmaxGpuBlockSize = 256;
template<typename T>
struct SoftmaxUtil {
using ComputeType = T;
__device__ static ComputeType ToComputeType(T v) { return v; }
__device__ static T FromComputeType(ComputeType v) { return v; }
};
template<>
struct SoftmaxUtil<half> {
using ComputeType = float;
__device__ static ComputeType ToComputeType(half v) { return __half2float(v); }
__device__ static half FromComputeType(ComputeType v) { return __float2half(v); }
};
__device__ double Exp(double x) { return exp(x); }
__device__ float Exp(float x) { return expf(x); }
template<typename T>
int GetForwardDynamicSharedMemorySize(const int num_classes) {
return num_classes * sizeof(typename SoftmaxUtil<T>::ComputeType);
}
template<typename T>
int GetBackwardDynamicSharedMemorySize(const int num_classes) {
return 2 * num_classes * sizeof(typename SoftmaxUtil<T>::ComputeType);
}
int GetSoftmaxBlockSize() { return kSoftmaxGpuBlockSize; }
int GetSoftmaxNumBlocks(const int num_instances) {
return std::min(static_cast<int>(num_instances), kCudaMaxBlocksNum);
}
template<typename T>
int GetMinNumClasses() {
return 32;
}
template<typename T>
__global__ void SoftmaxGpuForwardImpl(const int num_instances, const int num_classes, const T* in,
T* prob) {
using SU = SoftmaxUtil<T>;
using ComputeType = typename SU::ComputeType;
extern __shared__ __align__(sizeof(ComputeType)) unsigned char fw_shared_buf[];
auto* compute_buf = reinterpret_cast<ComputeType*>(fw_shared_buf);
__shared__ ComputeType row_reduce_result;
typedef cub::BlockReduce<ComputeType, kSoftmaxGpuBlockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage cub_reduce_tmp_storage;
const int tid = threadIdx.x;
for (int row = blockIdx.x; row < num_instances; row += gridDim.x) {
const int row_offset = row * num_classes;
const T* in_row = in + row_offset;
T* prob_row = prob + row_offset;
ComputeType thread_max = GetMinVal<ComputeType>();
for (int col = tid; col < num_classes; col += kSoftmaxGpuBlockSize) {
const ComputeType x = SU::ToComputeType(in_row[col]);
compute_buf[col] = x;
thread_max = max(thread_max, x);
}
__syncthreads();
ComputeType block_max = BlockReduce(cub_reduce_tmp_storage).Reduce(thread_max, cub::Max());
if (tid == 0) { row_reduce_result = block_max; }
__syncthreads();
const ComputeType row_max_t = row_reduce_result;
ComputeType thread_sum = 0;
for (int col = tid; col < num_classes; col += kSoftmaxGpuBlockSize) {
const ComputeType exp_x = Exp(compute_buf[col] - row_max_t);
compute_buf[col] = exp_x;
thread_sum += exp_x;
}
__syncthreads();
ComputeType block_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(thread_sum, cub::Sum());
if (tid == 0) { row_reduce_result = block_sum; }
__syncthreads();
const ComputeType row_sum_t = row_reduce_result;
for (int col = tid; col < num_classes; col += kSoftmaxGpuBlockSize) {
prob_row[col] = SU::FromComputeType(compute_buf[col] / row_sum_t);
}
}
}
template<typename T>
void SoftmaxForwardGpu(DeviceCtx* ctx, const int num_instances, const int num_classes, const T* in,
T* prob) {
SoftmaxGpuForwardImpl<<<GetSoftmaxNumBlocks(num_instances), GetSoftmaxBlockSize(),
GetForwardDynamicSharedMemorySize<T>(num_classes), ctx->cuda_stream()>>>(
num_instances, num_classes, in, prob);
}
template<>
void SoftmaxForwardGpu<float16>(DeviceCtx* ctx, const int num_instances, const int num_classes,
const float16* in, float16* prob) {
SoftmaxForwardGpu<half>(ctx, num_instances, num_classes, reinterpret_cast<const half*>(in),
reinterpret_cast<half*>(prob));
}
template<typename T>
int GetForwardFusedKernelMaxActiveBlocks(const int num_classes) {
int max_active_blocks;
OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, SoftmaxGpuForwardImpl<T>, GetSoftmaxBlockSize(),
GetForwardDynamicSharedMemorySize<T>(num_classes)));
return max_active_blocks;
}
template<>
int GetForwardFusedKernelMaxActiveBlocks<float16>(const int num_classes) {
int max_active_blocks;
OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, SoftmaxGpuForwardImpl<half>, GetSoftmaxBlockSize(),
GetForwardDynamicSharedMemorySize<half>(num_classes)));
return max_active_blocks;
}
template<typename T>
bool IsForwardFusedKernelSupported(const int num_classes) {
if (num_classes >= GetMinNumClasses<T>()
&& GetForwardFusedKernelMaxActiveBlocks<T>(num_classes) > 0) {
return true;
} else {
return false;
}
}
template<typename T>
__global__ void SoftmaxGpuBackwardImpl(const int num_instances, const int num_classes, const T* dy,
const T* prob, T* dx) {
using SU = SoftmaxUtil<T>;
using ComputeType = typename SU::ComputeType;
extern __shared__ __align__(sizeof(ComputeType)) unsigned char bw_shared_buf[];
auto* dy_buf = reinterpret_cast<ComputeType*>(bw_shared_buf);
auto* prob_buf =
reinterpret_cast<ComputeType*>(bw_shared_buf + num_classes * sizeof(ComputeType));
__shared__ ComputeType row_reduce_result;
typedef cub::BlockReduce<ComputeType, kSoftmaxGpuBlockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage cub_reduce_tmp_storage;
const int tid = threadIdx.x;
for (int row = blockIdx.x; row < num_instances; row += gridDim.x) {
const int row_offset = row * num_classes;
const T* dy_row = dy + row_offset;
const T* prob_row = prob + row_offset;
T* dx_row = dx + row_offset;
ComputeType thread_sum = 0;
for (int col = tid; col < num_classes; col += kSoftmaxGpuBlockSize) {
const ComputeType dy_col = SU::ToComputeType(dy_row[col]);
dy_buf[col] = dy_col;
const ComputeType prob_col = SU::ToComputeType(prob_row[col]);
prob_buf[col] = prob_col;
thread_sum += (dy_col * prob_col);
}
__syncthreads();
ComputeType block_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(thread_sum, cub::Sum());
if (tid == 0) { row_reduce_result = block_sum; }
__syncthreads();
const ComputeType row_sum_t = row_reduce_result;
for (int col = tid; col < num_classes; col += kSoftmaxGpuBlockSize) {
dx_row[col] = SU::FromComputeType((dy_buf[col] - row_sum_t) * prob_buf[col]);
}
}
}
template<typename T>
void SoftmaxBackwardGpu(DeviceCtx* ctx, const int num_instances, const int num_classes, const T* in,
const T* prob, T* dx) {
SoftmaxGpuBackwardImpl<<<GetSoftmaxNumBlocks(num_instances), GetSoftmaxBlockSize(),
GetBackwardDynamicSharedMemorySize<T>(num_classes),
ctx->cuda_stream()>>>(num_instances, num_classes, in, prob, dx);
}
template<>
void SoftmaxBackwardGpu<float16>(DeviceCtx* ctx, const int num_instances, const int num_classes,
const float16* in, const float16* prob, float16* dx) {
SoftmaxBackwardGpu<half>(ctx, num_instances, num_classes, reinterpret_cast<const half*>(in),
reinterpret_cast<const half*>(prob), reinterpret_cast<half*>(dx));
}
template<typename T>
int GetBackwardFusedKernelMaxActiveBlocks(const int num_classes) {
int max_active_blocks;
OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, SoftmaxGpuBackwardImpl<T>, GetSoftmaxBlockSize(),
GetBackwardDynamicSharedMemorySize<T>(num_classes)));
return max_active_blocks;
}
template<>
int GetBackwardFusedKernelMaxActiveBlocks<float16>(const int num_classes) {
int max_active_blocks;
OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, SoftmaxGpuBackwardImpl<half>, GetSoftmaxBlockSize(),
GetBackwardDynamicSharedMemorySize<half>(num_classes)));
return max_active_blocks;
}
template<typename T>
bool IsBackwardFusedKernelSupported(const int num_classes) {
if (num_classes >= GetMinNumClasses<T>()
&& GetBackwardFusedKernelMaxActiveBlocks<T>(num_classes) > 0) {
return true;
} else {
return false;
}
}
template<typename T>
class SoftmaxKernel final : public user_op::OpKernel {
public:
SoftmaxKernel() = default;
~SoftmaxKernel() override = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const ShapeView& in_shape = in->shape();
const int64_t num_classes = in_shape.At(in_shape.NumAxes() - 1);
const int64_t num_instances = in_shape.Count(0, in_shape.NumAxes() - 1);
if (IsForwardFusedKernelSupported<T>(num_classes)) {
SoftmaxForwardGpu<T>(ctx->device_ctx(), num_instances, num_classes, in->dptr<T>(),
out->mut_dptr<T>());
} else {
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
SoftmaxKernelUtil<DeviceType::kGPU, T>::ComputeProb(
ctx->device_ctx(), num_instances, num_classes, in->dptr<T>(), out->mut_dptr<T>(),
tmp_buffer->mut_dptr(), tmp_buffer->shape().elem_cnt());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_SOFTMAX_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("softmax") \
.SetCreateFn<SoftmaxKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape* in_shape = ctx->Shape4ArgNameAndIndex("in", 0); \
const int64_t num_classes = in_shape->At(in_shape->NumAxes() - 1); \
const int64_t num_instances = in_shape->Count(0, in_shape->NumAxes() - 1); \
return SoftmaxKernelUtil<DeviceType::kGPU, dtype>::GetComputeProbTempStorageSizeInBytes( \
num_instances, num_classes); \
});
REGISTER_SOFTMAX_GPU_KERNEL(float16)
REGISTER_SOFTMAX_GPU_KERNEL(float)
REGISTER_SOFTMAX_GPU_KERNEL(double)
#undef REGISTER_SOFTMAX_GPU_KERNEL
template<typename T>
class SoftmaxGradKernel final : public user_op::OpKernel {
public:
SoftmaxGradKernel() = default;
~SoftmaxGradKernel() override = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const int64_t num_classes = y->shape().At(y->shape().NumAxes() - 1);
const int64_t num_instances = y->shape().elem_cnt() / num_classes;
if (IsBackwardFusedKernelSupported<T>(num_classes)) {
SoftmaxBackwardGpu<T>(ctx->device_ctx(), num_instances, num_classes, dy->dptr<T>(),
y->dptr<T>(), dx->mut_dptr<T>());
} else {
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
SoftmaxKernelUtil<DeviceType::kGPU, T>::ComputeDiff(
ctx->device_ctx(), num_instances, num_classes, dy->dptr<T>(), y->dptr<T>(),
dx->mut_dptr<T>(), tmp_buffer->mut_dptr(), tmp_buffer->shape().elem_cnt());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_SOFTMAX_GRAD_KERNEL(dtype) \
REGISTER_USER_KERNEL("softmax_grad") \
.SetCreateFn<SoftmaxGradKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape* dy_shape = ctx->Shape4ArgNameAndIndex("dy", 0); \
const int64_t num_classes = dy_shape->At(dy_shape->NumAxes() - 1); \
const int64_t num_instances = dy_shape->Count(0, dy_shape->NumAxes() - 1); \
return SoftmaxKernelUtil<DeviceType::kGPU, dtype>::GetComputeProbTempStorageSizeInBytes( \
num_instances, num_classes); \
});
REGISTER_SOFTMAX_GRAD_KERNEL(float16)
REGISTER_SOFTMAX_GRAD_KERNEL(float)
REGISTER_SOFTMAX_GRAD_KERNEL(double)
#undef REGISTER_SOFTMAX_GRAD_KERNEL
} // namespace
} // namespace oneflow
|
3b06c636494fa0c87138806c7ba376adca4854c9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
#define BYDIMF 2
#define CDIM 5
#define BYDIMB 5
#if __CUDA_ARCH__ >= 300
/*
* Positive kernel for word2vec. This handles the positively-label word pairs with
* one context word and the current word.
*/
template<int SKIP, int YDIM, int NREPS>
__global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {
const int nwindow = 2*SKIP+1;
int iwords[nwindow];
float aa[NREPS];
float daa[NREPS];
float bb[NREPS][nwindow];
float dbb[NREPS][nwindow];
__shared__ float CC[YDIM * nwindow];
int i, j, k, tid, indx, icol, dxy, lb, ub;
float prod, v, ascale, bscale;
tid = threadIdx.x + blockDim.x * threadIdx.y;
dxy = blockDim.x * blockDim.y;
bool good;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
float inr = 1.0f / nrows;
#pragma unroll
for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers
if (istart + i - SKIP - 1 >= 0) {
iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word address
} else {
iwords[i] = -1;
}
good = (iwords[i] >= 0);
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get the B vector for this word
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][i] = B[indx + iwords[i]];
} else {
bb[j][i] = 0;
}
dbb[j][i] = 0;
}
}
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < nwindow-1; i++) { // slide iwords down
iwords[i] = iwords[i+1];
#pragma unroll
for (j = 0; j < NREPS; j++) {
bb[j][i] = bb[j][i+1]; // slide data down
dbb[j][i] = dbb[j][i+1]; // slide deriv down
}
}
good = (icol + SKIP < ncols);
if (good) {
iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word address
} else {
iwords[nwindow - 1] = -1;
}
good = good && iwords[nwindow-1] >= 0;
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get a new B column
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]];
} else {
bb[j][nwindow - 1] = 0;
}
dbb[j][nwindow-1] = 0;
if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column
aa[j] = A[indx + iwords[SKIP]];
} else {
aa[j] = 0;
}
}
lb = LB[icol];
ub = UB[icol];
__syncthreads();
if (iwords[SKIP] >= 0) {
#pragma unroll
for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols
prod = 0;
if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols
}
#pragma unroll
for (k = 1; k < 32; k = k + k) {
v = __shfl_down(prod, k); // Reduce within warp
prod += v;
}
if (threadIdx.x == 0) {
CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce across warps
for (k = tid; k <= ub - lb; k += dxy) {
CC[k] += CC[k + i * nwindow];
}
__syncthreads();
}
__syncthreads(); // Apply the sigmoid map
for (i = tid; i <= ub - lb; i += dxy) {
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = 1.0f - v; // All pairs have label 1
}
__syncthreads();
#pragma unroll
for (j = 0; j < NREPS; j++) {
daa[j] = 0;
}
ascale = pow(max(0, iwords[SKIP])*inr + 1.0f, vexp);
#pragma unroll
for (i = 0; i < nwindow; i++) { // Iterate across the window for A cols
if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP && iwords[i] >= 0) {
bscale = pow(max(0, iwords[i])*inr + 1.0f, vexp);
v = lrate * CC[i - SKIP - lb];
#pragma unroll
for (j = 0; j < NREPS; j++) {
daa[j] += ascale * v * bb[j][i]; // Update A's derivative
dbb[j][i] += bscale * v * aa[j]; // Update B's derivative
}
}
}
__syncthreads();
#pragma unroll
for (j = 0; j < NREPS; j++) {
if (tid + j * dxy < nrows) { // Save the A column
atomicAdd(&A[tid + j * dxy + iwords[SKIP]], daa[j]);
}
}
if (iwords[0] >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) {
if (tid + j * dxy < nrows) { // Save the B column
atomicAdd(&B[tid + j * dxy + iwords[0]], dbb[j][0]);
}
}
}
__syncthreads();
}
}
#pragma unroll
for (i = 1; i < nwindow; i++) { // Clear out the derivative queue
if (iwords[i] >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Save the B column
if (tid + j * dxy < nrows) {
atomicAdd(&B[tid + j * dxy + iwords[i]], dbb[j][i]);
}
}
}
}
}
/*
* Convolutional kernel for word2vec. This handles the positively-label word pairs with
* one context word and the current word.
*/
template<int SKIP, int YDIM, int NREPS>
__global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {
const int nwindow = 2*SKIP+1;
int iwords[nwindow];
float aa[NREPS];
float bb[NREPS][nwindow];
__shared__ float CC[YDIM * nwindow];
int i, j, k, tid, indx, icol, dxy, lb, ub;
float prod, v;
tid = threadIdx.x + blockDim.x * threadIdx.y;
dxy = blockDim.x * blockDim.y;
bool good;
double sum = 0;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
#pragma unroll
for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers
if (istart + i - SKIP - 1 >= 0) {
iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word
} else {
iwords[i] = -1;
}
good = (iwords[i] >= 0);
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get the B vector for this word
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][i] = B[indx + iwords[i]];
} else {
bb[j][i] = 0;
}
}
}
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < nwindow-1; i++) { // slide iwords down
iwords[i] = iwords[i+1];
#pragma unroll
for (j = 0; j < NREPS; j++) {
bb[j][i] = bb[j][i+1]; // slide data down
}
}
good = (icol + SKIP < ncols);
if (good) {
iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word
} else {
iwords[nwindow - 1] = -1;
}
good = good && iwords[nwindow-1] >= 0;
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get a new B column
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]];
} else {
bb[j][nwindow - 1] = 0;
}
if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column
aa[j] = A[indx + iwords[SKIP]];
} else {
aa[j] = 0;
}
}
lb = LB[icol];
ub = UB[icol];
__syncthreads();
#pragma unroll
for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols
if (i >= SKIP + lb && i <= SKIP + ub) {
if (i == SKIP || iwords[SKIP] < 0 || iwords[i] < 0) { // Give this word a large score (gives zero contribution to loss)
prod = 20.0f;
} else {
prod = 0;
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols
}
#pragma unroll
for (k = 1; k < 32; k = k + k) {
v = __shfl_down(prod, k); // Reduce within warp
prod += v;
}
}
if (threadIdx.x == 0) {
CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce across warps
for (k = tid; k <= ub - lb; k += dxy) {
CC[k] += CC[k + i * nwindow];
}
__syncthreads();
}
__syncthreads(); // Apply the sigmoid map
for (i = tid; i <= ub - lb; i += dxy) {
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = log(max(v, 1.0e-20f)); // Compute the loss
}
__syncthreads();
for (i = 1; i <= ub - lb; i = i + i) {
if ((tid & (i-1)) == 0 && tid + i <= ub - lb) {
CC[tid] += CC[tid + i];
}
__syncthreads();
}
sum += CC[0];
__syncthreads();
}
if (tid == 0) {
atomicAdd(&Retval[0], (float)sum);
}
}
template<int NSKIP, int BYDIM>
__global__ void __word2vecPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {
__shared__ float CC[NSKIP*2*BYDIM];
float aa;
int ib[NSKIP*2];
float prods[NSKIP*2];
float bscale[NSKIP*2];
int ia, iword, lb, ub;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol, jcol;
float bb, db, dv, v, ascale, tmp;
float inr = 1.0f / nrows;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
ia = nrows * W[icol];
if (ia >= 0) { // Load lb and ub values
lb = LB[icol];
ub = UB[icol];
jcol = threadIdx.x - NSKIP;
iword = -1;
if (jcol >= lb && jcol <= ub) { // Load words in the window
iword = W[icol + jcol];
}
#pragma unroll
for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods
ib[i] = nrows * __shfl(iword, i);
ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1);
prods[i] = 0;
prods[i+NSKIP] = 0;
}
for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words
aa = A[i + ia];
#pragma unroll
for (j = 0; j < NSKIP*2; j++) {
if (ib[j] >= 0) {
bb = B[i + ib[j]];
prods[j] += aa * bb;
}
}
}
#pragma unroll
for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp
#pragma unroll
for (k = 1; k < 32; k = k+k) {
tmp = __shfl_down(prods[j], k);
prods[j] += tmp;
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (j = 0; j < 2*NSKIP; j++) {
CC[j + NSKIP * 2 * threadIdx.y] = prods[j];
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps
__syncthreads();
for (j = tid; j < NSKIP * 2; j += dxy) {
CC[j] += CC[j + i * NSKIP * 2];
}
}
__syncthreads();
for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = lrate * (1 - v); // All these pairs have label 1
}
__syncthreads(); // Now do scaled gradients
ascale = pow(max(0, ia)*inr + 1.0f, vexp); // Simulated ADAGRAD on A
for (j = 0; j < NSKIP * 2; j++) { // Load B data
if (ib[j] >= 0) {
bscale[j] = pow(max(0, ib[j])*inr + 1.0f, vexp); // Simulated ADAGRAD on B
} else {
bscale[j] = 0;
}
prods[j] = CC[j];
}
__syncthreads();
dv = 0;
for (i = tid; i < nrows; i += dxy) { // Update vecs with derivatives
aa = A[i + ia];
#pragma unroll
for (j = 0; j < NSKIP * 2; j++) { // Load B data
if (ib[j] >= 0) {
bb = B[i + ib[j]];
dv += ascale * prods[j] * bb;
db = bscale[j] * prods[j] * aa;
atomicAdd(&B[i + ib[j]], db); // Update B
}
}
atomicAdd(&A[i + ia], dv); // Update A
}
__syncthreads();
}
}
}
template<int NSKIP, int BYDIM>
__global__ void __word2vecEvalPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *retval) {
__shared__ float CC[NSKIP*2*BYDIM];
float aa;
float prods[NSKIP*2];
int ia, iword, lb, ub;
int ib[NSKIP*2];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol, jcol;
float bb, v, tmp, sum;
sum = 0;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
ia = nrows * W[icol];
if (ia >= 0) { // Load lb and ub values
lb = LB[icol];
ub = UB[icol];
jcol = threadIdx.x - NSKIP;
iword = -1;
if (jcol >= lb && jcol <= ub) { // Load words in the window
iword = W[icol + jcol];
}
#pragma unroll
for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods
ib[i] = nrows * __shfl(iword, i);
ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1);
prods[i] = 0;
prods[i+NSKIP] = 0;
}
for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words
aa = A[i + ia];
#pragma unroll
for (j = 0; j < NSKIP*2; j++) {
if (ib[j] >= 0) {
bb = B[i + ib[j]];
prods[j] += aa * bb;
}
}
}
#pragma unroll
for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp
#pragma unroll
for (k = 1; k < 32; k = k+k) {
tmp = __shfl_down(prods[j], k);
prods[j] += tmp;
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (j = 0; j < 2*NSKIP; j++) {
CC[j + NSKIP * 2 * threadIdx.y] = prods[j];
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps
__syncthreads();
for (j = tid; j < NSKIP * 2; j += dxy) {
CC[j] += CC[j + i * NSKIP * 2];
}
}
__syncthreads();
for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = log(max(v, 1.0e-20f)); // All these pairs have label 1
}
__syncthreads(); // Now sum likelihood over window
for (i = 1; i < 2 * NSKIP; i = i + i) {
if ((tid & (i-1)) == 0 && tid + i < 2 * NSKIP) {
CC[tid] += CC[tid + i];
}
__syncthreads();
}
sum += CC[0];
__syncthreads();
}
}
if (tid == 0) {
atomicAdd(&retval[0], (float)sum);
}
}
/*
* Combined forward-backward word2vec kernel
*/
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float aa[NWA];
float bb[NWB];
float prods[NWA][NWB];
int ia[NWA];
int ib[NWB];
float bscale[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol;
float dv, v, ascale;
float inr = 1.0f / nrows;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
bb[j] = B[i + ib[j]];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the products of these elements
v = A[i + ia[j]];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += v * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = - lrate * v; // All these pairs have label 0
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWA; j++) { // Load A data
aa[j] = A[i + ia[j]];
}
#pragma unroll
for (k = 0; k < NWB; k++) { // Load B data
bb[k] = B[i + ib[k]];
bscale[k] = pow(max(0, ib[k])*inr + 1.0f, vexp);
prods[0][k] = 0;
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the products
ascale = pow(max(0, ia[j])*inr + 1.0f, vexp);
dv = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
v = CC[j + k * NWA];
dv += ascale * v * bb[k];
prods[0][k] += bscale[k] * v * aa[j];
}
atomicAdd(&A[i + ia[j]], dv); // Update A
}
#pragma unroll
for (k = 0; k < NWB; k++) {
atomicAdd(&B[i + ib[k]], prods[0][k]); // Update B
}
}
__syncthreads();
}
}
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNegFilt(int nrows, int ncols, int nwords, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float aa[NWA];
float bb[NWB];
float prods[NWA][NWB];
int ia[NWA];
int ib[NWB];
float bscale[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol, tmpi;
float dv, v, ascale;
float inr = 1.0f / nrows;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
tmpi = WA[i + icol * NWA]; // Fill the A word matrix
if (tmpi < nwords) {
tmpi = nrows * tmpi;
} else {
tmpi = -1;
}
ia[i] = tmpi;
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
tmpi = WB[i + icol * NWB]; // Fill the B word matrix
if (tmpi < nwords) {
tmpi = nrows * tmpi;
} else {
tmpi = -1;
}
ib[i] = tmpi;
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
if (ib[j] >= 0) {
bb[j] = B[i + ib[j]];
} else {
bb[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the products of these elements
if (ia[j] >= 0) {
v = A[i + ia[j]];
} else {
v = 0;
}
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += v * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = - lrate * v; // All these pairs have label 0
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWA; j++) { // Load A data
if (ia[j] >= 0) {
aa[j] = A[i + ia[j]];
} else {
aa[j] = 0;
}
}
#pragma unroll
for (k = 0; k < NWB; k++) { // Load B data
if (ib[k] >= 0) {
bb[k] = B[i + ib[k]];
} else {
bb[k] = 0;
}
bscale[k] = pow(max(0, ib[k])*inr + 1.0f, vexp);
prods[0][k] = 0;
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the products
ascale = pow(max(0, ia[j])*inr + 1.0f, vexp);
dv = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
v = CC[j + k * NWA];
dv += ascale * v * bb[k];
prods[0][k] += bscale[k] * v * aa[j];
}
if (ia[j] >= 0) {
atomicAdd(&A[i + ia[j]], dv); // Update A
}
}
#pragma unroll
for (k = 0; k < NWB; k++) {
if (ib[k] >= 0) {
atomicAdd(&B[i + ib[k]], prods[0][k]); // Update B
}
}
}
__syncthreads();
}
}
/*
* Combined forward-backward word2vec kernel
*/
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float bb[NWB];
float prods[NWA][NWB];
int ia[NWA];
int ib[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol;
float v;
double sum = 0;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
bb[j] = B[i + ib[j]];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the products of these elements
v = A[i + ia[j]];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += v * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = log(max(1.0f - v, 1.0e-20f)); // All these pairs have label 0
}
for (i = 1; i < NWA*NWB; i = i + i) {
if ((tid & (i-1)) == 0 && tid + i < NWA*NWB) {
CC[tid] += CC[tid + i];
}
__syncthreads();
}
sum += CC[0];
__syncthreads();
}
if (tid == 0) {
atomicAdd(&Retval[0], (float)sum);
}
}
/*
* Convolutional kernel for word2vec. This handles the positively-label word pairs with
* one context word and the current word.
*/
template<int SKIP, int YDIM, int NREPS>
__global__ void __word2vecPos_exp(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate) {
const int nwindow = 2*SKIP+1;
float aa[NREPS];
float da[NREPS];
__shared__ float CC[YDIM * nwindow];
int i, j, k, tid, icol, dxy, lb, ub, iword, cword;
float bb, db, prod, v;
tid = threadIdx.x + blockDim.x * threadIdx.y;
dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // Iterate over columns
iword = nrows * W[icol]; // Get the current word
__syncthreads();
lb = LB[icol];
ub = UB[icol];
if (iword >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
if (tid + j * dxy < nrows) { // Get A
aa[j] = A[tid + j * dxy + iword];
} else {
aa[j] = 0;
}
}
for (i = lb; i <= ub; i++) { // Iterate across the window for A cols
__syncthreads();
cword = nrows * W[icol + i]; // Get the current word
prod = 0;
if (cword >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
if (tid + j * dxy < nrows) { // Get B col
bb = B[tid + j * dxy + cword];
prod += aa[j] * bb; // Compute the product between current A, B cols
}
}
#pragma unroll
for (k = 1; k < 32; k = k + k) {
prod += __shfl_down(prod, k); // Reduce within warp
}
}
if (threadIdx.x == 0) {
CC[i - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM
}
}
__syncthreads();
for (j = 1; j < blockDim.y; j++) { // Reduce across warps
for (i = tid; i < ub - lb; i += dxy) {
CC[i] += CC[i + j * nwindow];
}
__syncthreads();
}
__syncthreads(); // Apply the sigmoid map
for (i = tid; i < ub - lb; i += dxy) {
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = lrate * (1.0f - v); // All pairs have label 1
}
__syncthreads();
#pragma unroll
for (j = 0; j < NREPS; j++) {
da[j] = 0;
}
for (i = lb; i <= ub; i++) { // Iterate across the window for A cols
cword = nrows * W[icol + i]; // Get the context word
v = CC[i - lb];
if (cword >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
if (tid + j * dxy < nrows) { // Get B col
bb = B[tid + j * dxy + cword];
da[j] += v * bb;
db = v * aa[j];
atomicAdd(&B[tid + j * dxy + cword], db);
}
}
}
}
#pragma unroll
for (j = 0; j < NREPS; j++) {
if (tid + j * dxy < nrows) {
atomicAdd(&A[tid + j * dxy + iword], da[j]);
}
}
}
}
}
/*
* Combined forward-backward word2vec kernel
*/
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNeg_old(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float dd[MAXD];
float prods[NWA][NWB];
float aa, v, sum;
int ia[NWA];
int ib[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int i, j, k, icol;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
if (ib[j] >= 0) {
dd[j] = B[i + ib[j]];
} else {
dd[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the inner products of these elements
if (ia[j] >= 0) {
aa = A[i + ia[j]];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += aa * dd[k];
}
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = - lrate * v; // All these pairs have label 0
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWB; j++) { // Load B data
if (ib[j] >= 0) {
dd[j] = B[i + ib[j]];
} else {
dd[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the product
if (ia[j] >= 0) {
sum = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
float xx = CC[j + k * NWA];
sum += xx * dd[k];
}
atomicAdd(&A[i + ia[j]], sum);
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Load A data
if (ia[j] >= 0) {
dd[j] = A[i + ia[j]];
} else {
dd[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWB; j++) { // Now do the product
if (ib[j] >= 0) {
sum = 0;
#pragma unroll
for (k = 0; k < NWA; k++) {
float xx = CC[k + j * NWA];
sum += xx * dd[k];
}
atomicAdd(&B[i + ib[j]], sum);
}
}
}
__syncthreads();
}
}
/*
*
* Simple forward kernel for word2vec. Computes inner products of columns from A with columns from B.
* The column indices are specified by two "word" matrices. The inner products are computed as an outer product
* of the word matrices.
*
* NWA is the number of words per column in WA
* NWB is the number of words per column in WB
*
* Columns of the output matrix C are <window> = NWA*NWB long, and contain inner products with corresponding columns of B.
*
*/
template<int NWA, int NWB, int BDIM>
__global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BDIM];
float aa;
float bb[NWB];
float prods[NWA][NWB];
int wa[NWA];
int wb[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int i, j, k, icol;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
wa[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
wb[i] = WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
bb[j] = B[i + wb[j] * nrows];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Computes the products of these elements
aa = A[i + wa[j] * nrows];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += aa * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
#pragma unroll
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWAB; i += dxy) { // Save to main memory
C[i + icol * NWAB] = CC[i];
//atomicAdd(&C[i + icol * NWAB], CC[i]);
}
__syncthreads();
}
}
/*
*
* Simple backward kernel for word2vec.
* Computes the gradient for A given B or vice-versa, and does an SGD update.
*
* NWA is the number of words per column in WA
* NWB is the number of words per column in WB
*
*/
template<int NWA, int NWB, int MAXDIM>
__global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) {
const int NWAB = NWA * NWB;
float dd[MAXDIM];
int wa[NWA];
int wb[NWB];
__shared__ float cc[NWA*NWB];
int tid = threadIdx.x;
int fid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int icol, i, j, k;
float sum;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // iterate in columns
#pragma unroll
for (j = 0; j < NWA; j++) {
wa[j] = WA[j + icol * NWA]; // Load the A word matrix
}
__syncthreads();
#pragma unroll
for (j = 0; j < NWB; j++) {
wb[j] = WB[j + icol * NWB]; // Load the B word matrix
}
for (i = fid; i < NWAB; i += dxy) {
cc[i] = C[i + icol * NWAB];
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWB; j++) { // Load the data
dd[j] = B[i + wb[j] * nrows];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the product
sum = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
float xx = cc[j + k * NWA];
sum += xx * dd[k];
}
atomicAdd(&A[i + wa[j] * nrows], sum * lrate);
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Load the data
dd[j] = A[i + wa[j] * nrows];
}
#pragma unroll
for (j = 0; j < NWB; j++) { // Now do the product
sum = 0;
#pragma unroll
for (k = 0; k < NWA; k++) {
float xx = cc[k + j * NWA];
sum += xx * dd[k];
}
atomicAdd(&B[i + wb[j] * nrows], sum * lrate);
}
}
}
}
#else
template<int SKIP, int BYDIM, int NREPS>
__global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {}
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {}
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNegFilt(int nrows, int ncols, int nwords, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {}
template<int SKIP, int BYDIM, int NREPS>
__global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {}
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) {}
template<int NWA, int NWB, int BDIM>
__global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) {}
template<int NWA, int NWB, int MAXDIM>
__global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) {}
#endif
int word2vecPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {
dim3 threads(32, CDIM, 1);
int nblocks = min(64, ncols);
switch(skip) {
case 5 :hipLaunchKernelGGL(( __word2vecPos<5, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, lrate, vexp); break;
case 3 :hipLaunchKernelGGL(( __word2vecPos<3, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, lrate, vexp); break;
case 2 :hipLaunchKernelGGL(( __word2vecPos<2, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, lrate, vexp); break;
default : printf("word2vecPos unsupport size %d\n", skip); return 1;
}
hipDeviceSynchronize();
int err = hipGetLastError();
return err;
}
int word2vecNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001:hipLaunchKernelGGL(( __word2vecNeg<5,1,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, lrate, vexp); break;
case 50005:hipLaunchKernelGGL(( __word2vecNeg<5,5,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, lrate, vexp); break;
case 100005:hipLaunchKernelGGL(( __word2vecNeg<10,5,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, lrate, vexp); break;
case 50010:hipLaunchKernelGGL(( __word2vecNeg<5,10,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, lrate, vexp); break;
// case 150010: __word2vecNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate); break;
default : printf("word2vec unsupport size combination %d %d\n", nwa, nwb); return 1;
}
hipDeviceSynchronize();
int err = hipGetLastError();
return err;
}
int word2vecNegFilt(int nrows, int ncols, int nwords, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001:hipLaunchKernelGGL(( __word2vecNegFilt<5,1,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break;
case 50005:hipLaunchKernelGGL(( __word2vecNegFilt<5,5,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break;
case 100005:hipLaunchKernelGGL(( __word2vecNegFilt<10,5,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break;
case 50010:hipLaunchKernelGGL(( __word2vecNegFilt<5,10,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break;
// case 150010: __word2vecNegFilt<15,10,15><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break;
default : printf("word2vec unsupport size combination %d %d\n", nwa, nwb); return 1;
}
hipDeviceSynchronize();
int err = hipGetLastError();
return err;
}
int word2vecEvalPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {
dim3 threads(32, CDIM, 1);
int nblocks = min(64, ncols);
switch(skip) {
case 5 :hipLaunchKernelGGL(( __word2vecEvalPos<5, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, Retval); break;
case 3 :hipLaunchKernelGGL(( __word2vecEvalPos<3, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, Retval); break;
case 2 :hipLaunchKernelGGL(( __word2vecEvalPos<2, CDIM, 10/CDIM>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, W, LB, UB, A, B, Retval); break;
default : printf("word2vecEvalPos unsupport size %d\n", skip); return 1;
}
hipDeviceSynchronize();
int err = hipGetLastError();
return err;
}
int word2vecEvalNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *Retval) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001:hipLaunchKernelGGL(( __word2vecEvalNeg<5,1,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, Retval); break;
case 50005:hipLaunchKernelGGL(( __word2vecEvalNeg<5,5,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, Retval); break;
case 100005:hipLaunchKernelGGL(( __word2vecEvalNeg<10,5,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, Retval); break;
case 50010:hipLaunchKernelGGL(( __word2vecEvalNeg<5,10,10,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, Retval); break;
// case 150010: __word2vecEvalNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break;
default : printf("word2vecEvalNeg unsupport size combination %d %d\n", nwa, nwb); return 1;
}
hipDeviceSynchronize();
int err = hipGetLastError();
return err;
}
int word2vecFwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(4096, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001:hipLaunchKernelGGL(( __word2vecFwd<5,1,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C); break;
case 50005:hipLaunchKernelGGL(( __word2vecFwd<5,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C); break;
case 100005:hipLaunchKernelGGL(( __word2vecFwd<10,5,BYDIMF>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C); break;
default : printf("word2vecFwd unsupport size combination %d %d\n", nwa, nwb); return 1;
}
hipDeviceSynchronize();
int err = hipGetLastError();
return err;
}
int word2vecBwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C, float lrate) {
dim3 threads(32*BYDIMB, 1, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001:hipLaunchKernelGGL(( __word2vecBwd<5,1,5>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C, lrate); break;
case 50005:hipLaunchKernelGGL(( __word2vecBwd<5,5,5>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C, lrate); break;
case 100005:hipLaunchKernelGGL(( __word2vecBwd<10,5,10>), dim3(nblocks),dim3(threads), 0, 0, nrows, ncols, WA, WB, A, B, C, lrate); break;
default : printf("word2vecBwd unsupport size combination %d %d\n", nwa, nwb); return 1;
}
hipDeviceSynchronize();
int err = hipGetLastError();
return err;
}
|
3b06c636494fa0c87138806c7ba376adca4854c9.cu
|
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
#define BYDIMF 2
#define CDIM 5
#define BYDIMB 5
#if __CUDA_ARCH__ >= 300
/*
* Positive kernel for word2vec. This handles the positively-label word pairs with
* one context word and the current word.
*/
template<int SKIP, int YDIM, int NREPS>
__global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {
const int nwindow = 2*SKIP+1;
int iwords[nwindow];
float aa[NREPS];
float daa[NREPS];
float bb[NREPS][nwindow];
float dbb[NREPS][nwindow];
__shared__ float CC[YDIM * nwindow];
int i, j, k, tid, indx, icol, dxy, lb, ub;
float prod, v, ascale, bscale;
tid = threadIdx.x + blockDim.x * threadIdx.y;
dxy = blockDim.x * blockDim.y;
bool good;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
float inr = 1.0f / nrows;
#pragma unroll
for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers
if (istart + i - SKIP - 1 >= 0) {
iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word address
} else {
iwords[i] = -1;
}
good = (iwords[i] >= 0);
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get the B vector for this word
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][i] = B[indx + iwords[i]];
} else {
bb[j][i] = 0;
}
dbb[j][i] = 0;
}
}
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < nwindow-1; i++) { // slide iwords down
iwords[i] = iwords[i+1];
#pragma unroll
for (j = 0; j < NREPS; j++) {
bb[j][i] = bb[j][i+1]; // slide data down
dbb[j][i] = dbb[j][i+1]; // slide deriv down
}
}
good = (icol + SKIP < ncols);
if (good) {
iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word address
} else {
iwords[nwindow - 1] = -1;
}
good = good && iwords[nwindow-1] >= 0;
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get a new B column
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]];
} else {
bb[j][nwindow - 1] = 0;
}
dbb[j][nwindow-1] = 0;
if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column
aa[j] = A[indx + iwords[SKIP]];
} else {
aa[j] = 0;
}
}
lb = LB[icol];
ub = UB[icol];
__syncthreads();
if (iwords[SKIP] >= 0) {
#pragma unroll
for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols
prod = 0;
if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols
}
#pragma unroll
for (k = 1; k < 32; k = k + k) {
v = __shfl_down(prod, k); // Reduce within warp
prod += v;
}
if (threadIdx.x == 0) {
CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce across warps
for (k = tid; k <= ub - lb; k += dxy) {
CC[k] += CC[k + i * nwindow];
}
__syncthreads();
}
__syncthreads(); // Apply the sigmoid map
for (i = tid; i <= ub - lb; i += dxy) {
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = 1.0f - v; // All pairs have label 1
}
__syncthreads();
#pragma unroll
for (j = 0; j < NREPS; j++) {
daa[j] = 0;
}
ascale = pow(max(0, iwords[SKIP])*inr + 1.0f, vexp);
#pragma unroll
for (i = 0; i < nwindow; i++) { // Iterate across the window for A cols
if (i >= SKIP + lb && i <= SKIP + ub && i != SKIP && iwords[i] >= 0) {
bscale = pow(max(0, iwords[i])*inr + 1.0f, vexp);
v = lrate * CC[i - SKIP - lb];
#pragma unroll
for (j = 0; j < NREPS; j++) {
daa[j] += ascale * v * bb[j][i]; // Update A's derivative
dbb[j][i] += bscale * v * aa[j]; // Update B's derivative
}
}
}
__syncthreads();
#pragma unroll
for (j = 0; j < NREPS; j++) {
if (tid + j * dxy < nrows) { // Save the A column
atomicAdd(&A[tid + j * dxy + iwords[SKIP]], daa[j]);
}
}
if (iwords[0] >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) {
if (tid + j * dxy < nrows) { // Save the B column
atomicAdd(&B[tid + j * dxy + iwords[0]], dbb[j][0]);
}
}
}
__syncthreads();
}
}
#pragma unroll
for (i = 1; i < nwindow; i++) { // Clear out the derivative queue
if (iwords[i] >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Save the B column
if (tid + j * dxy < nrows) {
atomicAdd(&B[tid + j * dxy + iwords[i]], dbb[j][i]);
}
}
}
}
}
/*
* Convolutional kernel for word2vec. This handles the positively-label word pairs with
* one context word and the current word.
*/
template<int SKIP, int YDIM, int NREPS>
__global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {
const int nwindow = 2*SKIP+1;
int iwords[nwindow];
float aa[NREPS];
float bb[NREPS][nwindow];
__shared__ float CC[YDIM * nwindow];
int i, j, k, tid, indx, icol, dxy, lb, ub;
float prod, v;
tid = threadIdx.x + blockDim.x * threadIdx.y;
dxy = blockDim.x * blockDim.y;
bool good;
double sum = 0;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
#pragma unroll
for (i = 0; i < nwindow; i++) { // Prefill the word and aa window buffers
if (istart + i - SKIP - 1 >= 0) {
iwords[i] = nrows * W[istart + i - SKIP - 1]; // Get a new word
} else {
iwords[i] = -1;
}
good = (iwords[i] >= 0);
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get the B vector for this word
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][i] = B[indx + iwords[i]];
} else {
bb[j][i] = 0;
}
}
}
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < nwindow-1; i++) { // slide iwords down
iwords[i] = iwords[i+1];
#pragma unroll
for (j = 0; j < NREPS; j++) {
bb[j][i] = bb[j][i+1]; // slide data down
}
}
good = (icol + SKIP < ncols);
if (good) {
iwords[nwindow - 1] = nrows * W[icol + SKIP]; // Get a new word
} else {
iwords[nwindow - 1] = -1;
}
good = good && iwords[nwindow-1] >= 0;
#pragma unroll
for (j = 0; j < NREPS; j++) { // Get a new B column
indx = tid + j * dxy;
if (good && indx < nrows) {
bb[j][nwindow - 1] = B[indx + iwords[nwindow - 1]];
} else {
bb[j][nwindow - 1] = 0;
}
if (iwords[SKIP] >= 0 && indx < nrows) { // Get a new A column
aa[j] = A[indx + iwords[SKIP]];
} else {
aa[j] = 0;
}
}
lb = LB[icol];
ub = UB[icol];
__syncthreads();
#pragma unroll
for (i = 0; i < nwindow; i++) { // Iterate across the window for B cols
if (i >= SKIP + lb && i <= SKIP + ub) {
if (i == SKIP || iwords[SKIP] < 0 || iwords[i] < 0) { // Give this word a large score (gives zero contribution to loss)
prod = 20.0f;
} else {
prod = 0;
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
prod += bb[j][i] * aa[j]; // Compute the product between current A, B cols
}
#pragma unroll
for (k = 1; k < 32; k = k + k) {
v = __shfl_down(prod, k); // Reduce within warp
prod += v;
}
}
if (threadIdx.x == 0) {
CC[i - SKIP - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce across warps
for (k = tid; k <= ub - lb; k += dxy) {
CC[k] += CC[k + i * nwindow];
}
__syncthreads();
}
__syncthreads(); // Apply the sigmoid map
for (i = tid; i <= ub - lb; i += dxy) {
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = log(max(v, 1.0e-20f)); // Compute the loss
}
__syncthreads();
for (i = 1; i <= ub - lb; i = i + i) {
if ((tid & (i-1)) == 0 && tid + i <= ub - lb) {
CC[tid] += CC[tid + i];
}
__syncthreads();
}
sum += CC[0];
__syncthreads();
}
if (tid == 0) {
atomicAdd(&Retval[0], (float)sum);
}
}
template<int NSKIP, int BYDIM>
__global__ void __word2vecPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {
__shared__ float CC[NSKIP*2*BYDIM];
float aa;
int ib[NSKIP*2];
float prods[NSKIP*2];
float bscale[NSKIP*2];
int ia, iword, lb, ub;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol, jcol;
float bb, db, dv, v, ascale, tmp;
float inr = 1.0f / nrows;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
ia = nrows * W[icol];
if (ia >= 0) { // Load lb and ub values
lb = LB[icol];
ub = UB[icol];
jcol = threadIdx.x - NSKIP;
iword = -1;
if (jcol >= lb && jcol <= ub) { // Load words in the window
iword = W[icol + jcol];
}
#pragma unroll
for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods
ib[i] = nrows * __shfl(iword, i);
ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1);
prods[i] = 0;
prods[i+NSKIP] = 0;
}
for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words
aa = A[i + ia];
#pragma unroll
for (j = 0; j < NSKIP*2; j++) {
if (ib[j] >= 0) {
bb = B[i + ib[j]];
prods[j] += aa * bb;
}
}
}
#pragma unroll
for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp
#pragma unroll
for (k = 1; k < 32; k = k+k) {
tmp = __shfl_down(prods[j], k);
prods[j] += tmp;
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (j = 0; j < 2*NSKIP; j++) {
CC[j + NSKIP * 2 * threadIdx.y] = prods[j];
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps
__syncthreads();
for (j = tid; j < NSKIP * 2; j += dxy) {
CC[j] += CC[j + i * NSKIP * 2];
}
}
__syncthreads();
for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = lrate * (1 - v); // All these pairs have label 1
}
__syncthreads(); // Now do scaled gradients
ascale = pow(max(0, ia)*inr + 1.0f, vexp); // Simulated ADAGRAD on A
for (j = 0; j < NSKIP * 2; j++) { // Load B data
if (ib[j] >= 0) {
bscale[j] = pow(max(0, ib[j])*inr + 1.0f, vexp); // Simulated ADAGRAD on B
} else {
bscale[j] = 0;
}
prods[j] = CC[j];
}
__syncthreads();
dv = 0;
for (i = tid; i < nrows; i += dxy) { // Update vecs with derivatives
aa = A[i + ia];
#pragma unroll
for (j = 0; j < NSKIP * 2; j++) { // Load B data
if (ib[j] >= 0) {
bb = B[i + ib[j]];
dv += ascale * prods[j] * bb;
db = bscale[j] * prods[j] * aa;
atomicAdd(&B[i + ib[j]], db); // Update B
}
}
atomicAdd(&A[i + ia], dv); // Update A
}
__syncthreads();
}
}
}
template<int NSKIP, int BYDIM>
__global__ void __word2vecEvalPosy(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *retval) {
__shared__ float CC[NSKIP*2*BYDIM];
float aa;
float prods[NSKIP*2];
int ia, iword, lb, ub;
int ib[NSKIP*2];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol, jcol;
float bb, v, tmp, sum;
sum = 0;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
ia = nrows * W[icol];
if (ia >= 0) { // Load lb and ub values
lb = LB[icol];
ub = UB[icol];
jcol = threadIdx.x - NSKIP;
iword = -1;
if (jcol >= lb && jcol <= ub) { // Load words in the window
iword = W[icol + jcol];
}
#pragma unroll
for (i = 0; i < NSKIP; i++) { // Share window word ids across threads, clear prods
ib[i] = nrows * __shfl(iword, i);
ib[i+NSKIP] = nrows * __shfl(iword, i+NSKIP+1);
prods[i] = 0;
prods[i+NSKIP] = 0;
}
for (i = tid; i < nrows; i += dxy) { // Compute products between center and context words
aa = A[i + ia];
#pragma unroll
for (j = 0; j < NSKIP*2; j++) {
if (ib[j] >= 0) {
bb = B[i + ib[j]];
prods[j] += aa * bb;
}
}
}
#pragma unroll
for (j = 0; j < NSKIP*2; j++) { // Reduce prods within each warp
#pragma unroll
for (k = 1; k < 32; k = k+k) {
tmp = __shfl_down(prods[j], k);
prods[j] += tmp;
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (j = 0; j < 2*NSKIP; j++) {
CC[j + NSKIP * 2 * threadIdx.y] = prods[j];
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) { // Reduce the products across warps
__syncthreads();
for (j = tid; j < NSKIP * 2; j += dxy) {
CC[j] += CC[j + i * NSKIP * 2];
}
}
__syncthreads();
for (i = tid; i < NSKIP * 2; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = log(max(v, 1.0e-20f)); // All these pairs have label 1
}
__syncthreads(); // Now sum likelihood over window
for (i = 1; i < 2 * NSKIP; i = i + i) {
if ((tid & (i-1)) == 0 && tid + i < 2 * NSKIP) {
CC[tid] += CC[tid + i];
}
__syncthreads();
}
sum += CC[0];
__syncthreads();
}
}
if (tid == 0) {
atomicAdd(&retval[0], (float)sum);
}
}
/*
* Combined forward-backward word2vec kernel
*/
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float aa[NWA];
float bb[NWB];
float prods[NWA][NWB];
int ia[NWA];
int ib[NWB];
float bscale[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol;
float dv, v, ascale;
float inr = 1.0f / nrows;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
bb[j] = B[i + ib[j]];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the products of these elements
v = A[i + ia[j]];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += v * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = - lrate * v; // All these pairs have label 0
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWA; j++) { // Load A data
aa[j] = A[i + ia[j]];
}
#pragma unroll
for (k = 0; k < NWB; k++) { // Load B data
bb[k] = B[i + ib[k]];
bscale[k] = pow(max(0, ib[k])*inr + 1.0f, vexp);
prods[0][k] = 0;
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the products
ascale = pow(max(0, ia[j])*inr + 1.0f, vexp);
dv = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
v = CC[j + k * NWA];
dv += ascale * v * bb[k];
prods[0][k] += bscale[k] * v * aa[j];
}
atomicAdd(&A[i + ia[j]], dv); // Update A
}
#pragma unroll
for (k = 0; k < NWB; k++) {
atomicAdd(&B[i + ib[k]], prods[0][k]); // Update B
}
}
__syncthreads();
}
}
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNegFilt(int nrows, int ncols, int nwords, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float aa[NWA];
float bb[NWB];
float prods[NWA][NWB];
int ia[NWA];
int ib[NWB];
float bscale[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol, tmpi;
float dv, v, ascale;
float inr = 1.0f / nrows;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
tmpi = WA[i + icol * NWA]; // Fill the A word matrix
if (tmpi < nwords) {
tmpi = nrows * tmpi;
} else {
tmpi = -1;
}
ia[i] = tmpi;
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
tmpi = WB[i + icol * NWB]; // Fill the B word matrix
if (tmpi < nwords) {
tmpi = nrows * tmpi;
} else {
tmpi = -1;
}
ib[i] = tmpi;
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
if (ib[j] >= 0) {
bb[j] = B[i + ib[j]];
} else {
bb[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the products of these elements
if (ia[j] >= 0) {
v = A[i + ia[j]];
} else {
v = 0;
}
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += v * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = - lrate * v; // All these pairs have label 0
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWA; j++) { // Load A data
if (ia[j] >= 0) {
aa[j] = A[i + ia[j]];
} else {
aa[j] = 0;
}
}
#pragma unroll
for (k = 0; k < NWB; k++) { // Load B data
if (ib[k] >= 0) {
bb[k] = B[i + ib[k]];
} else {
bb[k] = 0;
}
bscale[k] = pow(max(0, ib[k])*inr + 1.0f, vexp);
prods[0][k] = 0;
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the products
ascale = pow(max(0, ia[j])*inr + 1.0f, vexp);
dv = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
v = CC[j + k * NWA];
dv += ascale * v * bb[k];
prods[0][k] += bscale[k] * v * aa[j];
}
if (ia[j] >= 0) {
atomicAdd(&A[i + ia[j]], dv); // Update A
}
}
#pragma unroll
for (k = 0; k < NWB; k++) {
if (ib[k] >= 0) {
atomicAdd(&B[i + ib[k]], prods[0][k]); // Update B
}
}
}
__syncthreads();
}
}
/*
* Combined forward-backward word2vec kernel
*/
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float bb[NWB];
float prods[NWA][NWB];
int ia[NWA];
int ib[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
int i, j, k, icol;
float v;
double sum = 0;
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
bb[j] = B[i + ib[j]];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the products of these elements
v = A[i + ia[j]];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += v * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[i + NWA * (j + NWB * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else if (v < -16.0f) {
v = 0.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = log(max(1.0f - v, 1.0e-20f)); // All these pairs have label 0
}
for (i = 1; i < NWA*NWB; i = i + i) {
if ((tid & (i-1)) == 0 && tid + i < NWA*NWB) {
CC[tid] += CC[tid + i];
}
__syncthreads();
}
sum += CC[0];
__syncthreads();
}
if (tid == 0) {
atomicAdd(&Retval[0], (float)sum);
}
}
/*
* Convolutional kernel for word2vec. This handles the positively-label word pairs with
* one context word and the current word.
*/
template<int SKIP, int YDIM, int NREPS>
__global__ void __word2vecPos_exp(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate) {
const int nwindow = 2*SKIP+1;
float aa[NREPS];
float da[NREPS];
__shared__ float CC[YDIM * nwindow];
int i, j, k, tid, icol, dxy, lb, ub, iword, cword;
float bb, db, prod, v;
tid = threadIdx.x + blockDim.x * threadIdx.y;
dxy = blockDim.x * blockDim.y;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // Iterate over columns
iword = nrows * W[icol]; // Get the current word
__syncthreads();
lb = LB[icol];
ub = UB[icol];
if (iword >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
if (tid + j * dxy < nrows) { // Get A
aa[j] = A[tid + j * dxy + iword];
} else {
aa[j] = 0;
}
}
for (i = lb; i <= ub; i++) { // Iterate across the window for A cols
__syncthreads();
cword = nrows * W[icol + i]; // Get the current word
prod = 0;
if (cword >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
if (tid + j * dxy < nrows) { // Get B col
bb = B[tid + j * dxy + cword];
prod += aa[j] * bb; // Compute the product between current A, B cols
}
}
#pragma unroll
for (k = 1; k < 32; k = k + k) {
prod += __shfl_down(prod, k); // Reduce within warp
}
}
if (threadIdx.x == 0) {
CC[i - lb + threadIdx.y * nwindow] = prod; // Save to SHMEM
}
}
__syncthreads();
for (j = 1; j < blockDim.y; j++) { // Reduce across warps
for (i = tid; i < ub - lb; i += dxy) {
CC[i] += CC[i + j * nwindow];
}
__syncthreads();
}
__syncthreads(); // Apply the sigmoid map
for (i = tid; i < ub - lb; i += dxy) {
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = lrate * (1.0f - v); // All pairs have label 1
}
__syncthreads();
#pragma unroll
for (j = 0; j < NREPS; j++) {
da[j] = 0;
}
for (i = lb; i <= ub; i++) { // Iterate across the window for A cols
cword = nrows * W[icol + i]; // Get the context word
v = CC[i - lb];
if (cword >= 0) {
#pragma unroll
for (j = 0; j < NREPS; j++) { // Iterate over blocks of elements
if (tid + j * dxy < nrows) { // Get B col
bb = B[tid + j * dxy + cword];
da[j] += v * bb;
db = v * aa[j];
atomicAdd(&B[tid + j * dxy + cword], db);
}
}
}
}
#pragma unroll
for (j = 0; j < NREPS; j++) {
if (tid + j * dxy < nrows) {
atomicAdd(&A[tid + j * dxy + iword], da[j]);
}
}
}
}
}
/*
* Combined forward-backward word2vec kernel
*/
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNeg_old(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BYDIM];
float dd[MAXD];
float prods[NWA][NWB];
float aa, v, sum;
int ia[NWA];
int ib[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int i, j, k, icol;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
ia[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
ib[i] = nrows * WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
if (ib[j] >= 0) {
dd[j] = B[i + ib[j]];
} else {
dd[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Compute the inner products of these elements
if (ia[j] >= 0) {
aa = A[i + ia[j]];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += aa * dd[k];
}
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWA*NWB; i+= dxy) { // Compute logistic function on all products
v = CC[i];
if (v > 16.0f) {
v = 1.0f;
} else {
v = exp(v);
v = v / (1.0f + v);
}
CC[i] = - lrate * v; // All these pairs have label 0
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWB; j++) { // Load B data
if (ib[j] >= 0) {
dd[j] = B[i + ib[j]];
} else {
dd[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the product
if (ia[j] >= 0) {
sum = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
float xx = CC[j + k * NWA];
sum += xx * dd[k];
}
atomicAdd(&A[i + ia[j]], sum);
}
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Load A data
if (ia[j] >= 0) {
dd[j] = A[i + ia[j]];
} else {
dd[j] = 0;
}
}
#pragma unroll
for (j = 0; j < NWB; j++) { // Now do the product
if (ib[j] >= 0) {
sum = 0;
#pragma unroll
for (k = 0; k < NWA; k++) {
float xx = CC[k + j * NWA];
sum += xx * dd[k];
}
atomicAdd(&B[i + ib[j]], sum);
}
}
}
__syncthreads();
}
}
/*
*
* Simple forward kernel for word2vec. Computes inner products of columns from A with columns from B.
* The column indices are specified by two "word" matrices. The inner products are computed as an outer product
* of the word matrices.
*
* NWA is the number of words per column in WA
* NWB is the number of words per column in WB
*
* Columns of the output matrix C are <window> = NWA*NWB long, and contain inner products with corresponding columns of B.
*
*/
template<int NWA, int NWB, int BDIM>
__global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) {
const int NWAB = NWA*NWB;
__shared__ float CC[NWA*NWB*BDIM];
float aa;
float bb[NWB];
float prods[NWA][NWB];
int wa[NWA];
int wb[NWB];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int i, j, k, icol;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // Iterate over columns
#pragma unroll
for (i = 0; i < NWA; i++) {
wa[i] = nrows * WA[i + icol * NWA]; // Fill the A word matrix
#pragma unroll
for (j = 0; j < NWB; j++) { // clear the products matrix
prods[i][j] = 0;
}
}
#pragma unroll
for (i = 0; i < NWB; i++) {
wb[i] = WB[i + icol * NWB]; // Fill the B word matrix
}
for (i = tid; i < nrows; i += dxy) { // Now iterate over the rows of this block
#pragma unroll
for (j = 0; j < NWB ; j++) { // Read B
bb[j] = B[i + wb[j] * nrows];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Computes the products of these elements
aa = A[i + wa[j] * nrows];
#pragma unroll
for (k = 0; k < NWB; k++) {
prods[j][k] += aa * bb[k];
}
}
} // Finished the entire block
#pragma unroll
for (i = 0; i < NWA; i++) { // Reduce the products within each warp
#pragma unroll
for (j = 0; j < NWB; j++) {
#pragma unroll
for (k = 1; k < 32; k = k+k) {
float tmp = __shfl_down(prods[i][j], k);
prods[i][j] += tmp;
}
}
}
__syncthreads();
if (threadIdx.x == 0) { // Save the products to SHMEM (one copy per warp)
#pragma unroll
for (i = 0; i < NWA; i++) {
#pragma unroll
for (j = 0; j < NWB; j++) {
CC[j + NWB * (i + NWA * threadIdx.y)] = prods[i][j];
}
}
}
__syncthreads();
for (i = 1; i < blockDim.y; i++) {
__syncthreads();
#pragma unroll
for (j = tid; j < NWAB; j += dxy) { // Reduce the products across warps
CC[j] += CC[j + i * NWAB];
}
}
__syncthreads();
for (i = tid; i < NWAB; i += dxy) { // Save to main memory
C[i + icol * NWAB] = CC[i];
//atomicAdd(&C[i + icol * NWAB], CC[i]);
}
__syncthreads();
}
}
/*
*
* Simple backward kernel for word2vec.
* Computes the gradient for A given B or vice-versa, and does an SGD update.
*
* NWA is the number of words per column in WA
* NWB is the number of words per column in WB
*
*/
template<int NWA, int NWB, int MAXDIM>
__global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) {
const int NWAB = NWA * NWB;
float dd[MAXDIM];
int wa[NWA];
int wb[NWB];
__shared__ float cc[NWA*NWB];
int tid = threadIdx.x;
int fid = threadIdx.x + blockDim.x * threadIdx.y;
int dxy = blockDim.x * blockDim.y;
int icol, i, j, k;
float sum;
int istart = (int)((1L * blockIdx.x * ncols) / gridDim.x);
int iend = (int)((1L * (blockIdx.x+1) * ncols) / gridDim.x);
for (icol = istart; icol < iend; icol++) { // iterate in columns
#pragma unroll
for (j = 0; j < NWA; j++) {
wa[j] = WA[j + icol * NWA]; // Load the A word matrix
}
__syncthreads();
#pragma unroll
for (j = 0; j < NWB; j++) {
wb[j] = WB[j + icol * NWB]; // Load the B word matrix
}
for (i = fid; i < NWAB; i += dxy) {
cc[i] = C[i + icol * NWAB];
}
__syncthreads();
for (i = tid; i < nrows; i += dxy) {
#pragma unroll
for (j = 0; j < NWB; j++) { // Load the data
dd[j] = B[i + wb[j] * nrows];
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Now do the product
sum = 0;
#pragma unroll
for (k = 0; k < NWB; k++) {
float xx = cc[j + k * NWA];
sum += xx * dd[k];
}
atomicAdd(&A[i + wa[j] * nrows], sum * lrate);
}
#pragma unroll
for (j = 0; j < NWA; j++) { // Load the data
dd[j] = A[i + wa[j] * nrows];
}
#pragma unroll
for (j = 0; j < NWB; j++) { // Now do the product
sum = 0;
#pragma unroll
for (k = 0; k < NWA; k++) {
float xx = cc[k + j * NWA];
sum += xx * dd[k];
}
atomicAdd(&B[i + wb[j] * nrows], sum * lrate);
}
}
}
}
#else
template<int SKIP, int BYDIM, int NREPS>
__global__ void __word2vecPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {}
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {}
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecNegFilt(int nrows, int ncols, int nwords, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {}
template<int SKIP, int BYDIM, int NREPS>
__global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {}
template<int NWA, int NWB, int MAXD, int BYDIM>
__global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) {}
template<int NWA, int NWB, int BDIM>
__global__ void __word2vecFwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C) {}
template<int NWA, int NWB, int MAXDIM>
__global__ void __word2vecBwd(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *C, float lrate) {}
#endif
int word2vecPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float lrate, float vexp) {
dim3 threads(32, CDIM, 1);
int nblocks = min(64, ncols);
switch(skip) {
case 5 : __word2vecPos<5, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break;
case 3 : __word2vecPos<3, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break;
case 2 : __word2vecPos<2, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, lrate, vexp); break;
default : printf("word2vecPos unsupport size %d\n", skip); return 1;
}
cudaDeviceSynchronize();
int err = cudaGetLastError();
return err;
}
int word2vecNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001: __word2vecNeg<5,1,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break;
case 50005: __word2vecNeg<5,5,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break;
case 100005: __word2vecNeg<10,5,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break;
case 50010: __word2vecNeg<5,10,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate, vexp); break;
// case 150010: __word2vecNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, lrate); break;
default : printf("word2vec unsupport size combination %d %d\n", nwa, nwb); return 1;
}
cudaDeviceSynchronize();
int err = cudaGetLastError();
return err;
}
int word2vecNegFilt(int nrows, int ncols, int nwords, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001: __word2vecNegFilt<5,1,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break;
case 50005: __word2vecNegFilt<5,5,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break;
case 100005: __word2vecNegFilt<10,5,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break;
case 50010: __word2vecNegFilt<5,10,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break;
// case 150010: __word2vecNegFilt<15,10,15><<<nblocks,threads>>>(nrows, ncols, nwords, WA, WB, A, B, lrate, vexp); break;
default : printf("word2vec unsupport size combination %d %d\n", nwa, nwb); return 1;
}
cudaDeviceSynchronize();
int err = cudaGetLastError();
return err;
}
int word2vecEvalPos(int nrows, int ncols, int skip, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {
dim3 threads(32, CDIM, 1);
int nblocks = min(64, ncols);
switch(skip) {
case 5 : __word2vecEvalPos<5, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break;
case 3 : __word2vecEvalPos<3, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break;
case 2 : __word2vecEvalPos<2, CDIM, 10/CDIM><<<nblocks,threads>>>(nrows, ncols, W, LB, UB, A, B, Retval); break;
default : printf("word2vecEvalPos unsupport size %d\n", skip); return 1;
}
cudaDeviceSynchronize();
int err = cudaGetLastError();
return err;
}
int word2vecEvalNeg(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *Retval) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001: __word2vecEvalNeg<5,1,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break;
case 50005: __word2vecEvalNeg<5,5,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break;
case 100005: __word2vecEvalNeg<10,5,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break;
case 50010: __word2vecEvalNeg<5,10,10,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break;
// case 150010: __word2vecEvalNeg<15,10,15><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, Retval); break;
default : printf("word2vecEvalNeg unsupport size combination %d %d\n", nwa, nwb); return 1;
}
cudaDeviceSynchronize();
int err = cudaGetLastError();
return err;
}
int word2vecFwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C) {
dim3 threads(32, BYDIMF, 1);
int nblocks = min(4096, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001: __word2vecFwd<5,1,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C); break;
case 50005: __word2vecFwd<5,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C); break;
case 100005: __word2vecFwd<10,5,BYDIMF><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C); break;
default : printf("word2vecFwd unsupport size combination %d %d\n", nwa, nwb); return 1;
}
cudaDeviceSynchronize();
int err = cudaGetLastError();
return err;
}
int word2vecBwd(int nrows, int ncols, int nwa, int nwb, int *WA, int *WB, float *A, float *B, float *C, float lrate) {
dim3 threads(32*BYDIMB, 1, 1);
int nblocks = min(2048, 2 + (ncols - 1));
int which = nwa*10000 + nwb;
switch (which) {
case 50001: __word2vecBwd<5,1,5><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C, lrate); break;
case 50005: __word2vecBwd<5,5,5><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C, lrate); break;
case 100005: __word2vecBwd<10,5,10><<<nblocks,threads>>>(nrows, ncols, WA, WB, A, B, C, lrate); break;
default : printf("word2vecBwd unsupport size combination %d %d\n", nwa, nwb); return 1;
}
cudaDeviceSynchronize();
int err = cudaGetLastError();
return err;
}
|
309dc1a6af4b7676932601fa5850f5fca7939883.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <regionLayer.h>
#include <cfloat>
/** \brief kernel for softmax
* - n is the number of classes (included the background)
*
* - The CPU implementation is
* for b in batch:
* for g in groups:
* softmax(input + b*batchOffset + g*groupOffset, n, temp, stride, output + b*batchOffset + g*groupOffset)
*
* - The GPU implementation put the two for-loop into parallel.
*
* - nthdsPerCTA: the max number of threads per block.
* - Each thread will in charge of one point softmax for all classes.
* - Total number of threads: batch * groups
*
* - TODO: using warp shuffle instead of loop in one thread.
*/
template <unsigned nthdsPerCTA>
__launch_bounds__(nthdsPerCTA)
__global__ void softmaxKernel(const float * input,
const int n,
const int batch,
const int batchOffset,
const int groups,
const int groupOffset,
const int stride,
const float temp,
float * output)
{
int id = blockIdx.x * nthdsPerCTA + threadIdx.x;
// per batch, per group
if (id < batch * groups)
{
int b = id / groups;
int g = id % groups;
float sum = 0.;
float largest = -FLT_MAX;
int offset = b*batchOffset + g*groupOffset;
for (int i = 0; i < n; ++i)
{
float val = input[i*stride + offset];
largest = (val > largest) ? val : largest;
}
for (int i = 0; i < n; ++i)
{
float e = exp(input[i*stride + offset]/temp - largest/temp); // bound score in (-inf,0], and denominator fractor in (0,1].
sum += e;
output[i*stride + offset] = e;
}
for (int i = 0; i < n; ++i)
output[i*stride + offset] /= sum;
}
}
/**
* \brief Sigmoid function
*
* "__launch_bounds__" ensures the universality of kernel
*/
template <unsigned nthdsPerCTA>
__launch_bounds__(nthdsPerCTA)
__global__ void activateKernel(float * data,
const int range)
{
int i = blockIdx.x * nthdsPerCTA + threadIdx.x;
if (i < range)
data[i] = 1. / (1. + exp(-data[i]));
}
/**
* \brief region layer of YOLOv3
* Includes activation and softmax.
* - num: # bounding box per location
*
* If we integrated into tensorRT, we can use input and output are different memory.
* If it is standalone GPU code (in main.cpp), we can use input and output the same buffer.
*
* Note: The elements in YOLOv3
* * 4*nCells coords,
* * nCells conf,
* * classes*nCells classes
* e.g.
* * nCells for 0 class (background)
* * nCells for 1 class
* * ...
*/
void regionLayer_gpu(
const int batch,
const int C,
const int nCells,
const int num,
const int coords,
const int classes,
const float * input,
float * output,
hipStream_t stream)
{
const int blockSize = 256;
const int gridSize_Act1 = (2*nCells + blockSize - 1) / blockSize; // x, y
const int gridSize_Act2 = (nCells + blockSize - 1) / blockSize; // conf
const int gridSize_Softmax = (nCells + blockSize - 1) / blockSize; // classes
// for YOLOv3, the output of final layer is C*nCells, in which, C includes all the conf, coord, and claesses.
#ifdef REGION_IN_TRT
// TRT, input and output are diff buffer
ck(hipMemcpy((void*)output, (void*)input, batch*C*nCells*sizeof(float), hipMemcpyDeviceToDevice));
#endif
// else input and output can be same buffer
for (int b = 0; b < batch; ++b) {
for (int n = 0; n < num; ++n) {
// activate on (x,y)
int index = b*C*nCells // per batch
+ n*nCells*(coords+classes+1); // coords, classes and confidence
hipLaunchKernelGGL(( activateKernel<blockSize>)
, dim3(gridSize_Act1), dim3(blockSize), 0, stream,
output + index, 2*nCells);
// activate on probes on conf
index = b*C*nCells
+ n*nCells*(coords+classes+1)
+ 4*nCells; // skip coords
hipLaunchKernelGGL(( activateKernel<blockSize>)
, dim3(gridSize_Act2), dim3(blockSize), 0, stream,
output + index, nCells);
// softmax for all classes
index = b*C*nCells
+ n*nCells*(coords+classes+1)
+ 5*nCells; // skip conf
hipLaunchKernelGGL(( softmaxKernel<blockSize>)
, dim3(gridSize_Softmax), dim3(blockSize), 0, stream,
input + index, // input: skip loc, conf
classes, // n: #classes
batch*num, // batch: batch * #bound_box
(C*nCells/num), // batchOffset: number of bounding_box in total
nCells, // groups
1, // groupOffset
nCells, // stride
1.f, // temp
output + index); // output
}
}
}
#define nOutputLayer 3
template <unsigned nthdsPerCTA>
__launch_bounds__(nthdsPerCTA)
__global__ void reorgOutputKernel(
const int nBatch,
const int nClasses,
const int nBboxesPerLoc,
const int coords,
const int l0_w,
const int l0_h,
const int nCells,
float* dpData_unordered[],
float* dpData)
{
long i = blockIdx.x * nthdsPerCTA + threadIdx.x;
const int bboxMemLen = (nClasses + coords + 1) * nCells;
const int batchMemLen = nBboxesPerLoc * bboxMemLen;
const long range = nBatch * batchMemLen;
if (i < range) // voc<266175 coco<904995 wrt. 416*416 input
{
int b = i / batchMemLen;
int bboxIdx = (i % batchMemLen) / bboxMemLen;
int channelIdx = ((i % batchMemLen) % bboxMemLen) / nCells;
int locIdx = (i % batchMemLen) % nCells;
int locLayer, cnt_offset = 1+2*2+4*4;
for(int j = nOutputLayer-1; j >= 0; --j){
cnt_offset -= (1<<j)*(1<<j); // zoomFactor = 2
if(locIdx >= cnt_offset*l0_w*l0_h){
locLayer = j;
break;
}
}
dpData[i] = dpData_unordered[locLayer]\
[b*nBboxesPerLoc*(nClasses+coords+1)*(1<<locLayer)*(1<<locLayer)*l0_w*l0_h +\
bboxIdx*(nClasses+coords+1)*(1<<locLayer)*(1<<locLayer)*l0_w*l0_h +\
channelIdx*(1<<locLayer)*(1<<locLayer)*l0_w*l0_h +\
locIdx - cnt_offset*l0_w*l0_h];
}
}
void reorgOutput_gpu(
const int nBatch,
const int nClasses,
const int nBboxesPerLoc,
const int coords,
const int l0_w,
const int l0_h,
const int nCells,
float* dpData_unordered[],
float* dpData,
const long nData,
hipStream_t stream)
{
const int blockSize = 512;
const int gridSize = (nData + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( reorgOutputKernel<blockSize>)
, dim3(gridSize), dim3(blockSize), 0, stream,
nBatch, nClasses, nBboxesPerLoc, coords, l0_w, l0_h, nCells, dpData_unordered, dpData);
}
|
309dc1a6af4b7676932601fa5850f5fca7939883.cu
|
#include <regionLayer.h>
#include <cfloat>
/** \brief kernel for softmax
* - n is the number of classes (included the background)
*
* - The CPU implementation is
* for b in batch:
* for g in groups:
* softmax(input + b*batchOffset + g*groupOffset, n, temp, stride, output + b*batchOffset + g*groupOffset)
*
* - The GPU implementation put the two for-loop into parallel.
*
* - nthdsPerCTA: the max number of threads per block.
* - Each thread will in charge of one point softmax for all classes.
* - Total number of threads: batch * groups
*
* - TODO: using warp shuffle instead of loop in one thread.
*/
template <unsigned nthdsPerCTA>
__launch_bounds__(nthdsPerCTA)
__global__ void softmaxKernel(const float * input,
const int n,
const int batch,
const int batchOffset,
const int groups,
const int groupOffset,
const int stride,
const float temp,
float * output)
{
int id = blockIdx.x * nthdsPerCTA + threadIdx.x;
// per batch, per group
if (id < batch * groups)
{
int b = id / groups;
int g = id % groups;
float sum = 0.;
float largest = -FLT_MAX;
int offset = b*batchOffset + g*groupOffset;
for (int i = 0; i < n; ++i)
{
float val = input[i*stride + offset];
largest = (val > largest) ? val : largest;
}
for (int i = 0; i < n; ++i)
{
float e = exp(input[i*stride + offset]/temp - largest/temp); // bound score in (-inf,0], and denominator fractor in (0,1].
sum += e;
output[i*stride + offset] = e;
}
for (int i = 0; i < n; ++i)
output[i*stride + offset] /= sum;
}
}
/**
* \brief Sigmoid function
*
* "__launch_bounds__" ensures the universality of kernel
*/
template <unsigned nthdsPerCTA>
__launch_bounds__(nthdsPerCTA)
__global__ void activateKernel(float * data,
const int range)
{
int i = blockIdx.x * nthdsPerCTA + threadIdx.x;
if (i < range)
data[i] = 1. / (1. + exp(-data[i]));
}
/**
* \brief region layer of YOLOv3
* Includes activation and softmax.
* - num: # bounding box per location
*
* If we integrated into tensorRT, we can use input and output are different memory.
* If it is standalone GPU code (in main.cpp), we can use input and output the same buffer.
*
* Note: The elements in YOLOv3
* * 4*nCells coords,
* * nCells conf,
* * classes*nCells classes
* e.g.
* * nCells for 0 class (background)
* * nCells for 1 class
* * ...
*/
void regionLayer_gpu(
const int batch,
const int C,
const int nCells,
const int num,
const int coords,
const int classes,
const float * input,
float * output,
cudaStream_t stream)
{
const int blockSize = 256;
const int gridSize_Act1 = (2*nCells + blockSize - 1) / blockSize; // x, y
const int gridSize_Act2 = (nCells + blockSize - 1) / blockSize; // conf
const int gridSize_Softmax = (nCells + blockSize - 1) / blockSize; // classes
// for YOLOv3, the output of final layer is C*nCells, in which, C includes all the conf, coord, and claesses.
#ifdef REGION_IN_TRT
// TRT, input and output are diff buffer
ck(cudaMemcpy((void*)output, (void*)input, batch*C*nCells*sizeof(float), cudaMemcpyDeviceToDevice));
#endif
// else input and output can be same buffer
for (int b = 0; b < batch; ++b) {
for (int n = 0; n < num; ++n) {
// activate on (x,y)
int index = b*C*nCells // per batch
+ n*nCells*(coords+classes+1); // coords, classes and confidence
activateKernel<blockSize>
<<<gridSize_Act1, blockSize, 0, stream>>>
(output + index, 2*nCells);
// activate on probes on conf
index = b*C*nCells
+ n*nCells*(coords+classes+1)
+ 4*nCells; // skip coords
activateKernel<blockSize>
<<<gridSize_Act2, blockSize, 0, stream>>>
(output + index, nCells);
// softmax for all classes
index = b*C*nCells
+ n*nCells*(coords+classes+1)
+ 5*nCells; // skip conf
softmaxKernel<blockSize>
<<<gridSize_Softmax, blockSize, 0, stream>>>
(input + index, // input: skip loc, conf
classes, // n: #classes
batch*num, // batch: batch * #bound_box
(C*nCells/num), // batchOffset: number of bounding_box in total
nCells, // groups
1, // groupOffset
nCells, // stride
1.f, // temp
output + index); // output
}
}
}
#define nOutputLayer 3
template <unsigned nthdsPerCTA>
__launch_bounds__(nthdsPerCTA)
__global__ void reorgOutputKernel(
const int nBatch,
const int nClasses,
const int nBboxesPerLoc,
const int coords,
const int l0_w,
const int l0_h,
const int nCells,
float* dpData_unordered[],
float* dpData)
{
long i = blockIdx.x * nthdsPerCTA + threadIdx.x;
const int bboxMemLen = (nClasses + coords + 1) * nCells;
const int batchMemLen = nBboxesPerLoc * bboxMemLen;
const long range = nBatch * batchMemLen;
if (i < range) // voc<266175 coco<904995 wrt. 416*416 input
{
int b = i / batchMemLen;
int bboxIdx = (i % batchMemLen) / bboxMemLen;
int channelIdx = ((i % batchMemLen) % bboxMemLen) / nCells;
int locIdx = (i % batchMemLen) % nCells;
int locLayer, cnt_offset = 1+2*2+4*4;
for(int j = nOutputLayer-1; j >= 0; --j){
cnt_offset -= (1<<j)*(1<<j); // zoomFactor = 2
if(locIdx >= cnt_offset*l0_w*l0_h){
locLayer = j;
break;
}
}
dpData[i] = dpData_unordered[locLayer]\
[b*nBboxesPerLoc*(nClasses+coords+1)*(1<<locLayer)*(1<<locLayer)*l0_w*l0_h +\
bboxIdx*(nClasses+coords+1)*(1<<locLayer)*(1<<locLayer)*l0_w*l0_h +\
channelIdx*(1<<locLayer)*(1<<locLayer)*l0_w*l0_h +\
locIdx - cnt_offset*l0_w*l0_h];
}
}
void reorgOutput_gpu(
const int nBatch,
const int nClasses,
const int nBboxesPerLoc,
const int coords,
const int l0_w,
const int l0_h,
const int nCells,
float* dpData_unordered[],
float* dpData,
const long nData,
cudaStream_t stream)
{
const int blockSize = 512;
const int gridSize = (nData + blockSize - 1) / blockSize;
reorgOutputKernel<blockSize>
<<<gridSize, blockSize, 0, stream>>>
(nBatch, nClasses, nBboxesPerLoc, coords, l0_w, l0_h, nCells, dpData_unordered, dpData);
}
|
6ef8716b440d11070080a79a89fe4bd5996a4350.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "WTUpdateKernel.cuh"
void UpdateWTKernel(WTAll &argWT, Document &argDoc, int argChunkId) {
unsigned int* deviceCounter;
hipMalloc(&deviceCounter, sizeof(unsigned int));
hipMemset(deviceCounter, 0, sizeof(unsigned int));
int numOfTokenD = argDoc.numOfTokenVecD[argChunkId];
int numOfWordS = argWT.blockCount + argWT.warpCount;
WT_Update_Kernel << <GridDim, BlockDim >> > (argDoc.deviceTLTopic, argWT.deviceChunkNZWTCount, argWT.deviceChunkWTIndex, argWT.deviceChunkWTValue, argWT.deviceChunkWTCount, argWT.deviceChunkWTOffset, argWT.deviceWTRowSum, deviceCounter, numOfWordS, argDoc.d_dense, numOfTokenD);
H_ERR(hipDeviceSynchronize());
}
//
//
//void UpdateWTKernel(WTAll &argWT, Document &argDoc, int argChunkId) {
//
// int iterBlock = (argWT.blockCount - 1) / GridDim + 1;// number of iterations for block.
// //int iterBlock = 9;// number of iterations for block.
// int GridWarpDim = GridDim*BlockDim / 32;
// int iterAll = (argWT.blockCount - 1) / GridDim + 1 + (argWT.warpCount - 1) / GridWarpDim + 1; // number of total iterations.
//
// int blockCounter = 0;
// int warpCounter = 0;
// int numOfTokenD = argDoc.numOfTokenVecD[argChunkId];
// for (int i = 0; i < iterAll; i++)
// {
// if (i < iterBlock)
// {
// H_ERR(hipMemcpy(argDoc.d_blockCounter, &blockCounter, sizeof(int), hipMemcpyHostToDevice));
// tokenlist_to_matrix << <GridDim, BlockDim >> > (argDoc.deviceTLTopic, argWT.deviceChunkNZWTCount, argWT.deviceChunkWTIndex, argWT.deviceChunkWTValue, argWT.deviceChunkWTCount, argWT.deviceChunkWTOffset, argWT.deviceWTRowSum, argDoc.d_blockCounter, argWT.deviceBlockCount, argDoc.d_dense, numOfTokenD);
// H_ERR(hipDeviceSynchronize());
// blockCounter++;
//
//
// }
// else
// {
// hipMemcpy(argDoc.d_warpCounter, &warpCounter, sizeof(int), hipMemcpyHostToDevice);
// tokenlist_to_matrix_warp << <GridDim, BlockDim >> > (argDoc.deviceTLTopic, argWT.deviceChunkNZWTCount, argWT.deviceChunkWTIndex, argWT.deviceChunkWTValue, argWT.deviceChunkWTCount, argWT.deviceChunkWTOffset, argWT.deviceWTRowSum, argDoc.d_warpCounter, argWT.deviceBlockCount, argWT.deviceWarpCount, numOfTokenD);
// /*printf("abc %d", warpCounter);*/
// H_ERR(hipDeviceSynchronize());
// warpCounter++;
// }
// H_ERR(hipDeviceSynchronize());
//
//
// }
//
//}
//
|
6ef8716b440d11070080a79a89fe4bd5996a4350.cu
|
#include "WTUpdateKernel.cuh"
void UpdateWTKernel(WTAll &argWT, Document &argDoc, int argChunkId) {
unsigned int* deviceCounter;
cudaMalloc(&deviceCounter, sizeof(unsigned int));
cudaMemset(deviceCounter, 0, sizeof(unsigned int));
int numOfTokenD = argDoc.numOfTokenVecD[argChunkId];
int numOfWordS = argWT.blockCount + argWT.warpCount;
WT_Update_Kernel << <GridDim, BlockDim >> > (argDoc.deviceTLTopic, argWT.deviceChunkNZWTCount, argWT.deviceChunkWTIndex, argWT.deviceChunkWTValue, argWT.deviceChunkWTCount, argWT.deviceChunkWTOffset, argWT.deviceWTRowSum, deviceCounter, numOfWordS, argDoc.d_dense, numOfTokenD);
H_ERR(cudaDeviceSynchronize());
}
//
//
//void UpdateWTKernel(WTAll &argWT, Document &argDoc, int argChunkId) {
//
// int iterBlock = (argWT.blockCount - 1) / GridDim + 1;// number of iterations for block.
// //int iterBlock = 9;// number of iterations for block.
// int GridWarpDim = GridDim*BlockDim / 32;
// int iterAll = (argWT.blockCount - 1) / GridDim + 1 + (argWT.warpCount - 1) / GridWarpDim + 1; // number of total iterations.
//
// int blockCounter = 0;
// int warpCounter = 0;
// int numOfTokenD = argDoc.numOfTokenVecD[argChunkId];
// for (int i = 0; i < iterAll; i++)
// {
// if (i < iterBlock)
// {
// H_ERR(cudaMemcpy(argDoc.d_blockCounter, &blockCounter, sizeof(int), cudaMemcpyHostToDevice));
// tokenlist_to_matrix << <GridDim, BlockDim >> > (argDoc.deviceTLTopic, argWT.deviceChunkNZWTCount, argWT.deviceChunkWTIndex, argWT.deviceChunkWTValue, argWT.deviceChunkWTCount, argWT.deviceChunkWTOffset, argWT.deviceWTRowSum, argDoc.d_blockCounter, argWT.deviceBlockCount, argDoc.d_dense, numOfTokenD);
// H_ERR(cudaDeviceSynchronize());
// blockCounter++;
//
//
// }
// else
// {
// cudaMemcpy(argDoc.d_warpCounter, &warpCounter, sizeof(int), cudaMemcpyHostToDevice);
// tokenlist_to_matrix_warp << <GridDim, BlockDim >> > (argDoc.deviceTLTopic, argWT.deviceChunkNZWTCount, argWT.deviceChunkWTIndex, argWT.deviceChunkWTValue, argWT.deviceChunkWTCount, argWT.deviceChunkWTOffset, argWT.deviceWTRowSum, argDoc.d_warpCounter, argWT.deviceBlockCount, argWT.deviceWarpCount, numOfTokenD);
// /*printf("abc %d", warpCounter);*/
// H_ERR(cudaDeviceSynchronize());
// warpCounter++;
// }
// H_ERR(cudaDeviceSynchronize());
//
//
// }
//
//}
//
|
c49fe5452d0bd7e2ec250f63497bbe463f4752d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MSECriterion.cu"
#else
#include "THHApply.cuh"
void THNN_(MSECriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *output,
bool sizeAverage,
bool reduce)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 3, input, target, output);
if (reduce) {
THCTensor_(resize1d)(state, output, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
accreal sum = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, (accreal) 0,
thrust::plus<accreal>(), mse_functor<real, accreal>());
if (sizeAverage)
sum /= size;
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, real>::to(sum));
return;
}
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply3<real, real, real>(
state,
input,
target,
output,
mse_updateOutput_functor<real>());
}
void THNN_(MSECriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
bool sizeAverage,
bool reduce)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 4, input, target, gradInput, gradOutput);
if (reduce) {
ptrdiff_t size = THCTensor_(nElement)(state, input);
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
accreal norm = sizeAverage ? (accreal)(2)/size : (accreal)(2);
norm *= ScalarConvert<real, accreal>::to(THCTensor_(get1d)(state, gradOutput, 0));
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
THCTensor_(resizeAs)(state, gradInput, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<real> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, gradInput_data,
mse_updateGradInput_functor<real, accreal>(norm));
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
return;
}
THCUNN_check_shape(state, input, gradOutput);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<real> gradOutput_data(THCTensor_(data)(state, gradOutput));
thrust::device_ptr<real> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, gradInput_data,
mse_updateGradInput_functor<real, accreal>(2));
thrust::transform(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
gradInput_data, gradInput_data+size, gradOutput_data, gradInput_data,
thrust::multiplies<real>());
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(free)(state, gradOutput);
}
#endif
|
c49fe5452d0bd7e2ec250f63497bbe463f4752d6.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MSECriterion.cu"
#else
#include "THCApply.cuh"
void THNN_(MSECriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *output,
bool sizeAverage,
bool reduce)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 3, input, target, output);
if (reduce) {
THCTensor_(resize1d)(state, output, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
accreal sum = thrust::inner_product(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, (accreal) 0,
thrust::plus<accreal>(), mse_functor<real, accreal>());
if (sizeAverage)
sum /= size;
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, real>::to(sum));
return;
}
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply3<real, real, real>(
state,
input,
target,
output,
mse_updateOutput_functor<real>());
}
void THNN_(MSECriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
bool sizeAverage,
bool reduce)
{
THCUNN_check_shape(state, input, target);
THCUNN_assertSameGPU(state, 4, input, target, gradInput, gradOutput);
if (reduce) {
ptrdiff_t size = THCTensor_(nElement)(state, input);
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
accreal norm = sizeAverage ? (accreal)(2)/size : (accreal)(2);
norm *= ScalarConvert<real, accreal>::to(THCTensor_(get1d)(state, gradOutput, 0));
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
THCTensor_(resizeAs)(state, gradInput, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<real> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, gradInput_data,
mse_updateGradInput_functor<real, accreal>(norm));
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
return;
}
THCUNN_check_shape(state, input, gradOutput);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<real> gradOutput_data(THCTensor_(data)(state, gradOutput));
thrust::device_ptr<real> gradInput_data(THCTensor_(data)(state, gradInput));
thrust::transform(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
input_data, input_data+size, target_data, gradInput_data,
mse_updateGradInput_functor<real, accreal>(2));
thrust::transform(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
gradInput_data, gradInput_data+size, gradOutput_data, gradInput_data,
thrust::multiplies<real>());
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(free)(state, gradOutput);
}
#endif
|
ec4f33aa7a97cd7676b3518b54d99a03740a8d5e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudabridge.h"
__global__ void keyFinderKernel(int points, int compression);
__global__ void keyFinderKernelWithDouble(int points, int compression);
void callKeyFinderKernel(int blocks, int threads, int points, bool useDouble, int compression)
{
if(useDouble) {
hipLaunchKernelGGL(( keyFinderKernelWithDouble) , dim3(blocks), dim3(threads) , 0, 0, points, compression);
} else {
hipLaunchKernelGGL(( keyFinderKernel) , dim3(blocks), dim3(threads), 0, 0, points, compression);
}
waitForKernel();
}
void waitForKernel()
{
// Check for kernel launch error
hipError_t err = hipGetLastError();
if(err != hipSuccess) {
throw cuda::CudaException(err);
}
// Wait for kernel to complete
err = hipDeviceSynchronize();
fflush(stdout);
if(err != hipSuccess) {
throw cuda::CudaException(err);
}
}
|
ec4f33aa7a97cd7676b3518b54d99a03740a8d5e.cu
|
#include "cudabridge.h"
__global__ void keyFinderKernel(int points, int compression);
__global__ void keyFinderKernelWithDouble(int points, int compression);
void callKeyFinderKernel(int blocks, int threads, int points, bool useDouble, int compression)
{
if(useDouble) {
keyFinderKernelWithDouble <<<blocks, threads >>>(points, compression);
} else {
keyFinderKernel <<<blocks, threads>>> (points, compression);
}
waitForKernel();
}
void waitForKernel()
{
// Check for kernel launch error
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess) {
throw cuda::CudaException(err);
}
// Wait for kernel to complete
err = cudaDeviceSynchronize();
fflush(stdout);
if(err != cudaSuccess) {
throw cuda::CudaException(err);
}
}
|
2864f9a774c8e9503bf5b782db0eadcceb52cc01.hip
|
// !!! This is a file automatically generated by hipify!!!
//**********************************************************************
// *
// University Of North Carolina Charlotte *
// *
//Program: Convolution *
//Description: This program is to do convolution calculation *
// - CUDA *
// - GEMM convolution , global memory *
// *
//File Name: naivecon.c , naiveconv_kernel.cl *
//File Version: 1.0 *
//Baseline: Homework_2 *
// *
//Course: ECGR 6090 Heterogeneous Computing *
// *
//Programmed by: Yu Liu *
//Under Suppervision of: Dr. Hamed Tabkhi *
// *
//Input file: images/viptraffic0.ppm ... images/viptraffic119.ppm *
//Output file: none *
//**********************************************************************
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define BLOCKSIZE 256
#define HEIGHT 160
#define WIDTH 120
#define FLTSIZE 7 //filter size
#define PADDING 0
#define STRIDE 2
#define CHANNEL 96
//**********************************************************************
// Function Name: convolution (Kernel) *
// Description: - Execute direct(naive) convolution *
// - CUDA_global memory *
// Input file: none *
// Output file: none *
// Return: none *
//**********************************************************************
__global__ void convolution(unsigned char *image_d, unsigned char *output_d, float* filter, int imageGemmRgbSize, int filterSize, int channel)
{
int i, j, col;
int r, g, b;
col = blockIdx.x * blockDim.x + threadIdx.x; //image width *3
if (col < imageGemmRgbSize*channel)
{
r = 0;
g = 0;
b = 0;
for (i = 0; i < channel; i++)
{
for (j = 0; j < filterSize * filterSize; j++)
{
r += filter[i*channel + j] * image_d[col * filterSize * filterSize * 3]; //R
g += filter[i*channel + j] * image_d[col * filterSize * filterSize * 3 + 1]; //G
b += filter[i*channel + j] * image_d[col * filterSize * filterSize * 3 + 2]; //B
}
output_d[col * i * 3] = r;
output_d[col * i * 3 + 1] = g;
output_d[col * i * 3 + 2] = b;
}
}
}
//**********************************************************************
// Function Name: decode_image *
// Description: - read image in ppm formate, read the data of array *
// named frame[] *
// Input file: image file : viptrafficX.ppm *
// Output file: none *
// Return: 0 if success *
//**********************************************************************
int decode_image(unsigned char frame[HEIGHT * WIDTH * 3], char filename[])
{
FILE *pFile;
pFile = fopen(filename, "r");
fseek(pFile, 15L, SEEK_SET);//In ppm file, the first 15 bytes are content of "p6,120 160, 255", image data is from 16th bytes
fread(frame, sizeof(unsigned char), HEIGHT * WIDTH * 3 + 15, pFile);
fclose(pFile);
return 0;
}
//**********************************************************************
// Function Name:randomInit *
// Description: - Generate random value to an float array *
// *
// Input file: none *
// Output file: none *
// Return: kernel file size *
//**********************************************************************
int randomInit(float* data, int size, int range) // random form 0/255 to 255/255
{
int i;
srand(time(NULL));
for (i = 0; i < size; i++)
{
data[i] = rand() % range / (float)range;
}
//for (i = 0; i < size; i++) printf("%f;", data[i]); // for debugging
return 0;
}
//**********************************************************************
// Function Name: transpose_gemm *
// Description: - transpose image to GEMM *
// RGB chnannel *
// Input file: none *
// Output file: none *
// Return: 0 if success *
//**********************************************************************
int transpose_gemm_rgb(unsigned char* input, unsigned char* output)
{
int i, j, k, step;
int convline = 0;
for (i = 0; i < (HEIGHT - FLTSIZE + 1); i+=STRIDE) // Height iteration
{
for (j = 0; j < (WIDTH - FLTSIZE + 1) * 3; j += (3*STRIDE)) //Width iteration
{
for (k = 0; k < FLTSIZE*FLTSIZE ; k ++)
{
step = (i*STRIDE + k / FLTSIZE)*WIDTH * 3 + (j*STRIDE + k%FLTSIZE);
//output_2D[convline][k]=input[step]; //2D, 1 channel
output[convline] = input[step]; //R
output[convline + 1] = input[step + 1]; //G
output[convline + 2] = input[step + 2]; //B
convline += 3;
}
}
}
return 0;
}
//**********************************************************************
// Function Name:Main *
// Description: - Main function on host, configure the kernel parameter*
// and run kernel *
// Input file: none *
// Output file: none *
// Return: 0 if success *
//**********************************************************************
int main(void)
{
int filterSize = FLTSIZE;
int channel = CHANNEL;
int convWidth = (WIDTH - FLTSIZE + 2 * PADDING) / STRIDE + 1; //convolution width with padding
int convHeight = (HEIGHT - FLTSIZE + 2 * PADDING) / STRIDE + 1; //convolution heigth with padding
int imageRgbSize = HEIGHT * WIDTH * 3; // value is 57600 when image 160*120
int imageGemmRgbSize = convWidth * convHeight * FLTSIZE * FLTSIZE * 3; //value is 645183 when image 160*120, filter =7, padding =0, stride =2
int outputSize = convHeight * FLTSIZE * FLTSIZE * CHANNEL * 3; //value is 1086624 when image 160*120, filter =7, padding =0, stride =2, channel =3
int imagecount = 0; //counter for 120 images
unsigned char *image_d, *output_d;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float kernelExecTime = 0;
float timer;
float* filter = (float*)malloc(FLTSIZE*FLTSIZE * CHANNEL * sizeof(float));
unsigned char* image = (unsigned char*)malloc(imageRgbSize * sizeof(unsigned char));
unsigned char* imageGemmRgb = (unsigned char*)malloc(imageGemmRgbSize * sizeof(unsigned char));
unsigned char* output = (unsigned char*)malloc(outputSize * sizeof(unsigned char));
randomInit(filter, FLTSIZE*FLTSIZE*CHANNEL, 255); //initialize filter
////for debugging
//int k;
//for (k = 0; k < FLTSIZE*FLTSIZE*CHANNEL; k++)
//{
// printf("filter[%d]: %f; ", k, filter[k]);
//}
hipMalloc((void**)&image_d, imageGemmRgbSize * sizeof(unsigned char));
hipMalloc((void**)&output_d, outputSize * sizeof(unsigned char));
while (imagecount < 120)
{
char filename[50];//file length upto 50
sprintf(filename, "images/viptraffic%d.ppm", imagecount);//read viptrafficX.ppm
decode_image(image, filename); //get image data from file
transpose_gemm_rgb(image, imageGemmRgb);
imagecount++;
////for debugging
//int k;
//for (k = 65000; k < 65100; k++) //value is 645183 when image 160*120, filter =7, stride =2
//{
// printf("image[%d]: %d; ", k, imageGemmRgb[k]);
//}
//Copy from host to device
hipMemcpy(image_d, imageGemmRgb, imageGemmRgbSize, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
dim3 dimGrid((imageGemmRgbSize + BLOCKSIZE - 1) / BLOCKSIZE,(imageGemmRgbSize + BLOCKSIZE - 1) / BLOCKSIZE);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( convolution) , dim3(dimGrid), dim3(dimBlock) , 0, 0, image_d, output_d, filter, imageGemmRgbSize, filterSize, channel);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
//Copy from device to host
hipMemcpy(output, output_d, outputSize * sizeof(unsigned char), hipMemcpyDeviceToHost);
////for debugging
//int k;
//for (k = 10000; k < 10010; k++)
//{
// printf("output[%d]: %f; ", k, output[k]);
//}
hipEventElapsedTime(&timer, start, stop);
kernelExecTime += timer;
}
//Free memory allocation
hipFree(output_d);
hipFree(image_d);
free(output);
free(image);
free(imageGemmRgb);
free(filter);
printf("Cumputing done! Golbal memory applied in CUDA.\n");
printf("Image amount:%d; Image size:%d x %d; Padding:%d; Stride:%d; Filter Size:%d.\n", imagecount, WIDTH, HEIGHT, PADDING, STRIDE, FLTSIZE);
printf("Kernel Execution time: %f milli seconds\n", kernelExecTime);
//system("pause");
return EXIT_SUCCESS;
}
|
2864f9a774c8e9503bf5b782db0eadcceb52cc01.cu
|
//**********************************************************************
// *
// University Of North Carolina Charlotte *
// *
//Program: Convolution *
//Description: This program is to do convolution calculation *
// - CUDA *
// - GEMM convolution , global memory *
// *
//File Name: naivecon.c , naiveconv_kernel.cl *
//File Version: 1.0 *
//Baseline: Homework_2 *
// *
//Course: ECGR 6090 Heterogeneous Computing *
// *
//Programmed by: Yu Liu *
//Under Suppervision of: Dr. Hamed Tabkhi *
// *
//Input file: images/viptraffic0.ppm ... images/viptraffic119.ppm *
//Output file: none *
//**********************************************************************
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
#define BLOCKSIZE 256
#define HEIGHT 160
#define WIDTH 120
#define FLTSIZE 7 //filter size
#define PADDING 0
#define STRIDE 2
#define CHANNEL 96
//**********************************************************************
// Function Name: convolution (Kernel) *
// Description: - Execute direct(naive) convolution *
// - CUDA_global memory *
// Input file: none *
// Output file: none *
// Return: none *
//**********************************************************************
__global__ void convolution(unsigned char *image_d, unsigned char *output_d, float* filter, int imageGemmRgbSize, int filterSize, int channel)
{
int i, j, col;
int r, g, b;
col = blockIdx.x * blockDim.x + threadIdx.x; //image width *3
if (col < imageGemmRgbSize*channel)
{
r = 0;
g = 0;
b = 0;
for (i = 0; i < channel; i++)
{
for (j = 0; j < filterSize * filterSize; j++)
{
r += filter[i*channel + j] * image_d[col * filterSize * filterSize * 3]; //R
g += filter[i*channel + j] * image_d[col * filterSize * filterSize * 3 + 1]; //G
b += filter[i*channel + j] * image_d[col * filterSize * filterSize * 3 + 2]; //B
}
output_d[col * i * 3] = r;
output_d[col * i * 3 + 1] = g;
output_d[col * i * 3 + 2] = b;
}
}
}
//**********************************************************************
// Function Name: decode_image *
// Description: - read image in ppm formate, read the data of array *
// named frame[] *
// Input file: image file : viptrafficX.ppm *
// Output file: none *
// Return: 0 if success *
//**********************************************************************
int decode_image(unsigned char frame[HEIGHT * WIDTH * 3], char filename[])
{
FILE *pFile;
pFile = fopen(filename, "r");
fseek(pFile, 15L, SEEK_SET);//In ppm file, the first 15 bytes are content of "p6,120 160, 255", image data is from 16th bytes
fread(frame, sizeof(unsigned char), HEIGHT * WIDTH * 3 + 15, pFile);
fclose(pFile);
return 0;
}
//**********************************************************************
// Function Name:randomInit *
// Description: - Generate random value to an float array *
// *
// Input file: none *
// Output file: none *
// Return: kernel file size *
//**********************************************************************
int randomInit(float* data, int size, int range) // random form 0/255 to 255/255
{
int i;
srand(time(NULL));
for (i = 0; i < size; i++)
{
data[i] = rand() % range / (float)range;
}
//for (i = 0; i < size; i++) printf("%f;", data[i]); // for debugging
return 0;
}
//**********************************************************************
// Function Name: transpose_gemm *
// Description: - transpose image to GEMM *
// RGB chnannel *
// Input file: none *
// Output file: none *
// Return: 0 if success *
//**********************************************************************
int transpose_gemm_rgb(unsigned char* input, unsigned char* output)
{
int i, j, k, step;
int convline = 0;
for (i = 0; i < (HEIGHT - FLTSIZE + 1); i+=STRIDE) // Height iteration
{
for (j = 0; j < (WIDTH - FLTSIZE + 1) * 3; j += (3*STRIDE)) //Width iteration
{
for (k = 0; k < FLTSIZE*FLTSIZE ; k ++)
{
step = (i*STRIDE + k / FLTSIZE)*WIDTH * 3 + (j*STRIDE + k%FLTSIZE);
//output_2D[convline][k]=input[step]; //2D, 1 channel
output[convline] = input[step]; //R
output[convline + 1] = input[step + 1]; //G
output[convline + 2] = input[step + 2]; //B
convline += 3;
}
}
}
return 0;
}
//**********************************************************************
// Function Name:Main *
// Description: - Main function on host, configure the kernel parameter*
// and run kernel *
// Input file: none *
// Output file: none *
// Return: 0 if success *
//**********************************************************************
int main(void)
{
int filterSize = FLTSIZE;
int channel = CHANNEL;
int convWidth = (WIDTH - FLTSIZE + 2 * PADDING) / STRIDE + 1; //convolution width with padding
int convHeight = (HEIGHT - FLTSIZE + 2 * PADDING) / STRIDE + 1; //convolution heigth with padding
int imageRgbSize = HEIGHT * WIDTH * 3; // value is 57600 when image 160*120
int imageGemmRgbSize = convWidth * convHeight * FLTSIZE * FLTSIZE * 3; //value is 645183 when image 160*120, filter =7, padding =0, stride =2
int outputSize = convHeight * FLTSIZE * FLTSIZE * CHANNEL * 3; //value is 1086624 when image 160*120, filter =7, padding =0, stride =2, channel =3
int imagecount = 0; //counter for 120 images
unsigned char *image_d, *output_d;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float kernelExecTime = 0;
float timer;
float* filter = (float*)malloc(FLTSIZE*FLTSIZE * CHANNEL * sizeof(float));
unsigned char* image = (unsigned char*)malloc(imageRgbSize * sizeof(unsigned char));
unsigned char* imageGemmRgb = (unsigned char*)malloc(imageGemmRgbSize * sizeof(unsigned char));
unsigned char* output = (unsigned char*)malloc(outputSize * sizeof(unsigned char));
randomInit(filter, FLTSIZE*FLTSIZE*CHANNEL, 255); //initialize filter
////for debugging
//int k;
//for (k = 0; k < FLTSIZE*FLTSIZE*CHANNEL; k++)
//{
// printf("filter[%d]: %f; ", k, filter[k]);
//}
cudaMalloc((void**)&image_d, imageGemmRgbSize * sizeof(unsigned char));
cudaMalloc((void**)&output_d, outputSize * sizeof(unsigned char));
while (imagecount < 120)
{
char filename[50];//file length upto 50
sprintf(filename, "images/viptraffic%d.ppm", imagecount);//read viptrafficX.ppm
decode_image(image, filename); //get image data from file
transpose_gemm_rgb(image, imageGemmRgb);
imagecount++;
////for debugging
//int k;
//for (k = 65000; k < 65100; k++) //value is 645183 when image 160*120, filter =7, stride =2
//{
// printf("image[%d]: %d; ", k, imageGemmRgb[k]);
//}
//Copy from host to device
cudaMemcpy(image_d, imageGemmRgb, imageGemmRgbSize, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
dim3 dimGrid((imageGemmRgbSize + BLOCKSIZE - 1) / BLOCKSIZE,(imageGemmRgbSize + BLOCKSIZE - 1) / BLOCKSIZE);
cudaEventRecord(start, 0);
convolution <<<dimGrid, dimBlock >>> (image_d, output_d, filter, imageGemmRgbSize, filterSize, channel);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//Copy from device to host
cudaMemcpy(output, output_d, outputSize * sizeof(unsigned char), cudaMemcpyDeviceToHost);
////for debugging
//int k;
//for (k = 10000; k < 10010; k++)
//{
// printf("output[%d]: %f; ", k, output[k]);
//}
cudaEventElapsedTime(&timer, start, stop);
kernelExecTime += timer;
}
//Free memory allocation
cudaFree(output_d);
cudaFree(image_d);
free(output);
free(image);
free(imageGemmRgb);
free(filter);
printf("Cumputing done! Golbal memory applied in CUDA.\n");
printf("Image amount:%d; Image size:%d x %d; Padding:%d; Stride:%d; Filter Size:%d.\n", imagecount, WIDTH, HEIGHT, PADDING, STRIDE, FLTSIZE);
printf("Kernel Execution time: %f milli seconds\n", kernelExecTime);
//system("pause");
return EXIT_SUCCESS;
}
|
a082065b9e53295848831873e52e5cdc5fc89f38.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#define DATATYPE int
#define SMEMSIZE 1024
#define REP 128
//#define conflictnum 1
__global__ void shared_model_1(double *time,DATATYPE *in1,DATATYPE *in2,DATATYPE *out,int its, int conflictnum)
{
__shared__ DATATYPE smem1[SMEMSIZE];
__shared__ DATATYPE smem2[SMEMSIZE];
unsigned int tid=threadIdx.x;
while(tid<SMEMSIZE)
{
smem1[tid]=in1[tid];
smem2[tid]=in2[tid];
tid+=blockDim.x;
}
DATATYPE p,q=(threadIdx.x%conflictnum)*32+(threadIdx.x/conflictnum);
double time_tmp=0.0;
unsigned int start_time=0,stop_time=0;
unsigned int i,j;
for (i=0;i<its;i++)
{
__syncthreads();
start_time=clock();
#pragma unroll
for (j=0;j<REP;j++)
{
p=smem1[q];
q=smem2[p];
}
stop_time=clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/REP/its;
out[blockDim.x*blockIdx.x+threadIdx.x] = p+q;
time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp;
}
int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2, int conflictnum)
{
int its=30;
DATATYPE *d_in1,*d_in2;
hipMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE);
hipMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE);
hipMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice);
hipMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,hipMemcpyHostToDevice);
double *h_time,*d_time;
DATATYPE *d_out;
h_time=(double*)malloc(sizeof(double)*blocks*threads);
hipMalloc((void**)&d_time,sizeof(double)*blocks*threads);
hipMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads);
hipLaunchKernelGGL(( shared_model_1), dim3(blocks),dim3(threads), 0, 0, d_time,d_in1,d_in1,d_out,its, conflictnum);
hipMemcpy(h_time,d_time,sizeof(double)*blocks*threads,hipMemcpyDeviceToHost);
double avert=0.0,maxt=0.0,mint=99999.9;
int nn=0;
for (int i=0;i<blocks;i++)
{
for (int j=0;j<threads;j+=32)
{
avert+=h_time[i*threads+j];
nn++;
if (maxt<h_time[i*threads+j])
{
maxt=h_time[i*threads+j];
}
if (mint>h_time[i*threads+j])
{
mint=h_time[i*threads+j];
}
}
}
avert/=nn;
printf("%d\t%d\t\t%f\t%f\t%f\n", blocks,threads,avert,mint,maxt);
hipFree(d_time);
hipFree(d_out);
hipFree(d_in1);
hipFree(d_in2);
free(h_time);
return 0;
}
void init_order(DATATYPE *a,int n)
{
for (int i=0;i<n;i++)
{
a[i]=i;
}
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("%s <conflictnum> \n", argv[0]);
} else {
int value = atoi(argv[1]);
DATATYPE *h_in1;
h_in1=(DATATYPE*)malloc(sizeof(DATATYPE)*SMEMSIZE);
init_order(h_in1,SMEMSIZE);
printf("blocks\t threads\t aver \t min \t max \t(clocks)\n");
int blocks = 1;
for (int j = 0; j <= 512; j += 32) {
int threads = (j == 0 ? 1 : j);
main_test(blocks, threads, h_in1, h_in1, value);
}
free(h_in1);
}
return 0;
}
|
a082065b9e53295848831873e52e5cdc5fc89f38.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#define DATATYPE int
#define SMEMSIZE 1024
#define REP 128
//#define conflictnum 1
__global__ void shared_model_1(double *time,DATATYPE *in1,DATATYPE *in2,DATATYPE *out,int its, int conflictnum)
{
__shared__ DATATYPE smem1[SMEMSIZE];
__shared__ DATATYPE smem2[SMEMSIZE];
unsigned int tid=threadIdx.x;
while(tid<SMEMSIZE)
{
smem1[tid]=in1[tid];
smem2[tid]=in2[tid];
tid+=blockDim.x;
}
DATATYPE p,q=(threadIdx.x%conflictnum)*32+(threadIdx.x/conflictnum);
double time_tmp=0.0;
unsigned int start_time=0,stop_time=0;
unsigned int i,j;
for (i=0;i<its;i++)
{
__syncthreads();
start_time=clock();
#pragma unroll
for (j=0;j<REP;j++)
{
p=smem1[q];
q=smem2[p];
}
stop_time=clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/REP/its;
out[blockDim.x*blockIdx.x+threadIdx.x] = p+q;
time[blockDim.x*blockIdx.x+threadIdx.x] = time_tmp;
}
int main_test(int blocks,int threads,DATATYPE *h_in1,DATATYPE *h_in2, int conflictnum)
{
int its=30;
DATATYPE *d_in1,*d_in2;
cudaMalloc((void**)&d_in1,sizeof(DATATYPE)*SMEMSIZE);
cudaMalloc((void**)&d_in2,sizeof(DATATYPE)*SMEMSIZE);
cudaMemcpy(d_in1,h_in1,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice);
cudaMemcpy(d_in2,h_in2,sizeof(DATATYPE)*SMEMSIZE,cudaMemcpyHostToDevice);
double *h_time,*d_time;
DATATYPE *d_out;
h_time=(double*)malloc(sizeof(double)*blocks*threads);
cudaMalloc((void**)&d_time,sizeof(double)*blocks*threads);
cudaMalloc((void**)&d_out,sizeof(DATATYPE)*blocks*threads);
shared_model_1<<<blocks,threads>>>(d_time,d_in1,d_in1,d_out,its, conflictnum);
cudaMemcpy(h_time,d_time,sizeof(double)*blocks*threads,cudaMemcpyDeviceToHost);
double avert=0.0,maxt=0.0,mint=99999.9;
int nn=0;
for (int i=0;i<blocks;i++)
{
for (int j=0;j<threads;j+=32)
{
avert+=h_time[i*threads+j];
nn++;
if (maxt<h_time[i*threads+j])
{
maxt=h_time[i*threads+j];
}
if (mint>h_time[i*threads+j])
{
mint=h_time[i*threads+j];
}
}
}
avert/=nn;
printf("%d\t%d\t\t%f\t%f\t%f\n", blocks,threads,avert,mint,maxt);
cudaFree(d_time);
cudaFree(d_out);
cudaFree(d_in1);
cudaFree(d_in2);
free(h_time);
return 0;
}
void init_order(DATATYPE *a,int n)
{
for (int i=0;i<n;i++)
{
a[i]=i;
}
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("%s <conflictnum> \n", argv[0]);
} else {
int value = atoi(argv[1]);
DATATYPE *h_in1;
h_in1=(DATATYPE*)malloc(sizeof(DATATYPE)*SMEMSIZE);
init_order(h_in1,SMEMSIZE);
printf("blocks\t threads\t aver \t min \t max \t(clocks)\n");
int blocks = 1;
for (int j = 0; j <= 512; j += 32) {
int threads = (j == 0 ? 1 : j);
main_test(blocks, threads, h_in1, h_in1, value);
}
free(h_in1);
}
return 0;
}
|
47a68fbcabfd7d067487e69b5f63267e684b4fee.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2017 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/tests/test.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/cuda/thread_info.h"
#include "saiga/cuda/cudaHelper.h"
#include "saiga/time/timer.h"
#include "saiga/cuda/scan.h"
namespace Saiga {
namespace CUDA {
using uint = unsigned int;
void scanTest(){
CUDA_SYNC_CHECK_ERROR();
const bool exclusive = false;
const size_t THREADS_PER_BLOCK = 256;
const int TILES_PER_BLOCK = 8;
const int ELEMENTS_PER_VECTOR = 4;
const int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * TILES_PER_BLOCK * ELEMENTS_PER_VECTOR;
int N = 100 * 1000 * 1000;
std::cout << "Elements: " << N << " Elements per block: " << ELEMENTS_PER_BLOCK << std::endl;
size_t readWrites = N * 2 * sizeof(uint);
CUDA::PerformanceTestHelper pth("Scan (exclusive)", readWrites);
thrust::host_vector<uint> h(N,1);
for(int i = 0; i < N ; ++i){
h[i] = rand() % 4;
}
thrust::device_vector<uint> v = h;
thrust::device_vector<uint> d_res(N + ELEMENTS_PER_BLOCK,-1);
thrust::host_vector<uint> h_res(N + ELEMENTS_PER_BLOCK,-1);
thrust::device_vector<uint> aggregate( CUDA::getBlockCount(N, ELEMENTS_PER_BLOCK) + 1,-1);
{
float time;
{
ScopedTimer<float> t(&time);
if(exclusive){
int sum = 0;
for(int i = 0 ; i < N; ++i){
h_res[i] = sum;
sum += h[i];
}
}else{
int sum = 0;
for(int i = 0 ; i < N; ++i){
sum += h[i];
h_res[i] = sum;
}
}
}
pth.addMeassurement("CPU scan",time);
}
{
float time;
{
CUDA::CudaScopedTimer t(time);
if(exclusive){
thrust::exclusive_scan(v.begin(),v.end(),d_res.begin());
}else{
thrust::inclusive_scan(v.begin(),v.end(),d_res.begin());
}
}
// SAIGA_ASSERT(sum == res);
pth.addMeassurement("thrust::scan",time);
}
SAIGA_ASSERT(d_res == h_res);
{
d_res = thrust::device_vector<uint>(N + ELEMENTS_PER_BLOCK,-1);
float time;
auto NUM_BLOCKS = CUDA::getBlockCount(N,ELEMENTS_PER_BLOCK);
{
CUDA::CudaScopedTimer t(time);
hipLaunchKernelGGL(( CUDA::tiledSinglePassScan<exclusive,THREADS_PER_BLOCK,TILES_PER_BLOCK,int4,true>), dim3(NUM_BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, v,d_res,aggregate);
}
pth.addMeassurement("tiledSinglePassScan",time);
}
/*
//check if the aggregate was computed correctly
thrust::host_vector<unsigned int> h_a = aggregate;
int i = ELEMENTS_PER_BLOCK;
for(int ag : h_a){
// SAIGA_ASSERT(ag == i);
i += ELEMENTS_PER_BLOCK;
}
thrust::host_vector<unsigned int> h_res2 = d_res;
int maxPrint = ELEMENTS_PER_BLOCK * 2;
for(int i = 0 ; i < int(h_res.size()) ; ++i){
if(h_res2[i] != h_res[i]){
std::cout << i << " " << h_res2[i] << "!=" << h_res[i] << std::endl;
maxPrint--;
if(maxPrint < 0)
break;
}
}
*/
SAIGA_ASSERT(d_res == h_res);
{
float time;
{
CUDA::CudaScopedTimer t(time);
hipMemcpy(thrust::raw_pointer_cast(d_res.data()),thrust::raw_pointer_cast(v.data()),N * sizeof(int),hipMemcpyDeviceToDevice);
}
pth.addMeassurement("hipMemcpy",time);
}
CUDA_SYNC_CHECK_ERROR();
}
}
}
|
47a68fbcabfd7d067487e69b5f63267e684b4fee.cu
|
/**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/tests/test.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/cuda/thread_info.h"
#include "saiga/cuda/cudaHelper.h"
#include "saiga/time/timer.h"
#include "saiga/cuda/scan.h"
namespace Saiga {
namespace CUDA {
using uint = unsigned int;
void scanTest(){
CUDA_SYNC_CHECK_ERROR();
const bool exclusive = false;
const size_t THREADS_PER_BLOCK = 256;
const int TILES_PER_BLOCK = 8;
const int ELEMENTS_PER_VECTOR = 4;
const int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * TILES_PER_BLOCK * ELEMENTS_PER_VECTOR;
int N = 100 * 1000 * 1000;
std::cout << "Elements: " << N << " Elements per block: " << ELEMENTS_PER_BLOCK << std::endl;
size_t readWrites = N * 2 * sizeof(uint);
CUDA::PerformanceTestHelper pth("Scan (exclusive)", readWrites);
thrust::host_vector<uint> h(N,1);
for(int i = 0; i < N ; ++i){
h[i] = rand() % 4;
}
thrust::device_vector<uint> v = h;
thrust::device_vector<uint> d_res(N + ELEMENTS_PER_BLOCK,-1);
thrust::host_vector<uint> h_res(N + ELEMENTS_PER_BLOCK,-1);
thrust::device_vector<uint> aggregate( CUDA::getBlockCount(N, ELEMENTS_PER_BLOCK) + 1,-1);
{
float time;
{
ScopedTimer<float> t(&time);
if(exclusive){
int sum = 0;
for(int i = 0 ; i < N; ++i){
h_res[i] = sum;
sum += h[i];
}
}else{
int sum = 0;
for(int i = 0 ; i < N; ++i){
sum += h[i];
h_res[i] = sum;
}
}
}
pth.addMeassurement("CPU scan",time);
}
{
float time;
{
CUDA::CudaScopedTimer t(time);
if(exclusive){
thrust::exclusive_scan(v.begin(),v.end(),d_res.begin());
}else{
thrust::inclusive_scan(v.begin(),v.end(),d_res.begin());
}
}
// SAIGA_ASSERT(sum == res);
pth.addMeassurement("thrust::scan",time);
}
SAIGA_ASSERT(d_res == h_res);
{
d_res = thrust::device_vector<uint>(N + ELEMENTS_PER_BLOCK,-1);
float time;
auto NUM_BLOCKS = CUDA::getBlockCount(N,ELEMENTS_PER_BLOCK);
{
CUDA::CudaScopedTimer t(time);
CUDA::tiledSinglePassScan<exclusive,THREADS_PER_BLOCK,TILES_PER_BLOCK,int4,true><<<NUM_BLOCKS,THREADS_PER_BLOCK>>>(v,d_res,aggregate);
}
pth.addMeassurement("tiledSinglePassScan",time);
}
/*
//check if the aggregate was computed correctly
thrust::host_vector<unsigned int> h_a = aggregate;
int i = ELEMENTS_PER_BLOCK;
for(int ag : h_a){
// SAIGA_ASSERT(ag == i);
i += ELEMENTS_PER_BLOCK;
}
thrust::host_vector<unsigned int> h_res2 = d_res;
int maxPrint = ELEMENTS_PER_BLOCK * 2;
for(int i = 0 ; i < int(h_res.size()) ; ++i){
if(h_res2[i] != h_res[i]){
std::cout << i << " " << h_res2[i] << "!=" << h_res[i] << std::endl;
maxPrint--;
if(maxPrint < 0)
break;
}
}
*/
SAIGA_ASSERT(d_res == h_res);
{
float time;
{
CUDA::CudaScopedTimer t(time);
cudaMemcpy(thrust::raw_pointer_cast(d_res.data()),thrust::raw_pointer_cast(v.data()),N * sizeof(int),cudaMemcpyDeviceToDevice);
}
pth.addMeassurement("cudaMemcpy",time);
}
CUDA_SYNC_CHECK_ERROR();
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.